source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
ocp_nlp_common.c | /*
* Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren,
* Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor,
* Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan,
* Jonas Koenemann, Yutao Chen, Tobias Schöls, Jonas Schlagenhauf, Moritz Diehl
*
* This file is part of acados.
*
* The 2-Clause BSD License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.;
*/
#include "acados/ocp_nlp/ocp_nlp_common.h"
#include <assert.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
// blasfeo
#include "blasfeo/include/blasfeo_common.h"
#include "blasfeo/include/blasfeo_d_blas.h"
// hpipm
#include "hpipm/include/hpipm_d_ocp_qp_dim.h"
// acados
#include "acados/utils/mem.h"
/************************************************
* config
************************************************/
int ocp_nlp_config_calculate_size(int N)
{
int ii;
int size = 0;
// self
size += sizeof(ocp_nlp_config);
// qp solver
size += 1 * ocp_qp_xcond_solver_config_calculate_size();
// regularization
size += ocp_nlp_reg_config_calculate_size();
// dynamics
size += N * sizeof(ocp_nlp_dynamics_config *);
for (ii = 0; ii < N; ii++) size += ocp_nlp_dynamics_config_calculate_size();
// cost
size += (N + 1) * sizeof(ocp_nlp_cost_config *);
for (ii = 0; ii <= N; ii++) size += ocp_nlp_cost_config_calculate_size();
// constraints
size += (N + 1) * sizeof(ocp_nlp_constraints_config *);
for (ii = 0; ii <= N; ii++) size += ocp_nlp_constraints_config_calculate_size();
return size;
}
ocp_nlp_config *ocp_nlp_config_assign(int N, void *raw_memory)
{
int ii;
char *c_ptr = (char *) raw_memory;
ocp_nlp_config *config = (ocp_nlp_config *) c_ptr;
c_ptr += sizeof(ocp_nlp_config);
config->N = N;
// qp solver
config->qp_solver = ocp_qp_xcond_solver_config_assign(c_ptr);
c_ptr += ocp_qp_xcond_solver_config_calculate_size();
// regularization
config->regularize = ocp_nlp_reg_config_assign(c_ptr);
c_ptr += ocp_nlp_reg_config_calculate_size();
// dynamics
config->dynamics = (ocp_nlp_dynamics_config **) c_ptr;
c_ptr += N * sizeof(ocp_nlp_dynamics_config *);
for (ii = 0; ii < N; ii++)
{
config->dynamics[ii] = ocp_nlp_dynamics_config_assign(c_ptr);
c_ptr += ocp_nlp_dynamics_config_calculate_size();
}
// cost
config->cost = (ocp_nlp_cost_config **) c_ptr;
c_ptr += (N + 1) * sizeof(ocp_nlp_cost_config *);
for (ii = 0; ii <= N; ii++)
{
config->cost[ii] = ocp_nlp_cost_config_assign(c_ptr);
c_ptr += ocp_nlp_cost_config_calculate_size();
}
// constraints
config->constraints = (ocp_nlp_constraints_config **) c_ptr;
c_ptr += (N + 1) * sizeof(ocp_nlp_constraints_config *);
for (ii = 0; ii <= N; ii++)
{
config->constraints[ii] = ocp_nlp_constraints_config_assign(c_ptr);
c_ptr += ocp_nlp_constraints_config_calculate_size();
}
return config;
}
/************************************************
* dims
************************************************/
static int ocp_nlp_dims_calculate_size_self(int N)
{
int size = 0;
size += sizeof(ocp_nlp_dims);
// nlp sizes
size += 6 * (N + 1) * sizeof(int); // nv, nx, nu, ni, nz, ns
// dynamics
size += N * sizeof(void *);
// cost
size += (N + 1) * sizeof(void *);
// constraints
size += (N + 1) * sizeof(void *);
// regularization
size += ocp_nlp_reg_dims_calculate_size(N);
size += sizeof(ocp_nlp_reg_dims);
size += 8; // initial align
return size;
}
int ocp_nlp_dims_calculate_size(void *config_)
{
ocp_nlp_config *config = config_;
int N = config->N;
int ii;
int size = 0;
// self
size += ocp_nlp_dims_calculate_size_self(N);
// dynamics
for (ii = 0; ii < N; ii++)
size += config->dynamics[ii]->dims_calculate_size(config->dynamics[ii]);
// cost
for (ii = 0; ii <= N; ii++) size += config->cost[ii]->dims_calculate_size(config->cost[ii]);
// constraints
for (ii = 0; ii <= N; ii++)
size += config->constraints[ii]->dims_calculate_size(config->constraints[ii]);
// qp solver
size += config->qp_solver->dims_calculate_size(config->qp_solver, N);
return size;
}
static ocp_nlp_dims *ocp_nlp_dims_assign_self(int N, void *raw_memory)
{
char *c_ptr = (char *) raw_memory;
int ii;
// initial align
align_char_to(8, &c_ptr);
// struct
ocp_nlp_dims *dims = (ocp_nlp_dims *) c_ptr;
c_ptr += sizeof(ocp_nlp_dims);
// nv
assign_and_advance_int(N + 1, &dims->nv, &c_ptr);
// nx
assign_and_advance_int(N + 1, &dims->nx, &c_ptr);
// nu
assign_and_advance_int(N + 1, &dims->nu, &c_ptr);
// ni
assign_and_advance_int(N + 1, &dims->ni, &c_ptr);
// nz
assign_and_advance_int(N + 1, &dims->nz, &c_ptr);
// ns
assign_and_advance_int(N + 1, &dims->ns, &c_ptr);
// dynamics
dims->dynamics = (void **) c_ptr;
c_ptr += N * sizeof(void *);
// cost
dims->cost = (void **) c_ptr;
c_ptr += (N + 1) * sizeof(void *);
// constraints
dims->constraints = (void **) c_ptr;
c_ptr += (N + 1) * sizeof(void *);
// regularization
dims->regularize = ocp_nlp_reg_dims_assign(N, c_ptr);
c_ptr += ocp_nlp_reg_dims_calculate_size(N);
/* initialize qp_solver dimensions */
// dims->qp_solver->N = N;
// for (ii = 0; ii <= N; ii++)
// {
// TODO(dimitris): values below are needed for reformulation of QP when soft constraints
// are not supported. Make this a bit more transparent as it clushes with nbx/nbu above.
// dims->qp_solver->nsbx[ii] = 0;
// dims->qp_solver->nsbu[ii] = 0;
// dims->qp_solver->nsg[ii] = 0;
// }
// N
dims->N = N;
// initialize dimensions to zero by default
// nv
for(ii=0; ii<=N; ii++)
dims->nv[ii] = 0;
// nx
for(ii=0; ii<=N; ii++)
dims->nx[ii] = 0;
// nu
for(ii=0; ii<=N; ii++)
dims->nu[ii] = 0;
// ni
for(ii=0; ii<=N; ii++)
dims->ni[ii] = 0;
// nz
for(ii=0; ii<=N; ii++)
dims->nz[ii] = 0;
// ns
for(ii=0; ii<=N; ii++)
dims->ns[ii] = 0;
// TODO initialize dims to zero by default also in modules !!!!!!!
// assert
assert((char *) raw_memory + ocp_nlp_dims_calculate_size_self(N) >= c_ptr);
return dims;
}
ocp_nlp_dims *ocp_nlp_dims_assign(void *config_, void *raw_memory)
{
ocp_nlp_config *config = config_;
int N = config->N;
int ii;
char *c_ptr = (char *) raw_memory;
// self
ocp_nlp_dims *dims = ocp_nlp_dims_assign_self(N, c_ptr);
c_ptr += ocp_nlp_dims_calculate_size_self(N);
// dynamics
for (ii = 0; ii < N; ii++)
{
dims->dynamics[ii] = config->dynamics[ii]->dims_assign(config->dynamics[ii], c_ptr);
c_ptr += config->dynamics[ii]->dims_calculate_size(config->dynamics[ii]);
}
// cost
for (ii = 0; ii <= N; ii++)
{
dims->cost[ii] = config->cost[ii]->dims_assign(config->cost[ii], c_ptr);
c_ptr += config->cost[ii]->dims_calculate_size(config->cost[ii]);
}
// constraints
for (ii = 0; ii <= N; ii++)
{
dims->constraints[ii] =
config->constraints[ii]->dims_assign(config->constraints[ii], c_ptr);
c_ptr += config->constraints[ii]->dims_calculate_size(config->constraints[ii]);
}
// qp solver
dims->qp_solver = config->qp_solver->dims_assign(config->qp_solver, N, c_ptr);
c_ptr += config->qp_solver->dims_calculate_size(config->qp_solver, N);
// assert
assert((char *) raw_memory + ocp_nlp_dims_calculate_size(config_) >= c_ptr);
return dims;
}
void ocp_nlp_dims_set_opt_vars(void *config_, void *dims_, const char *field,
const void* value_array)
{
// to set dimension nx, nu, nz, ns (number of slacks = number of soft constraints)
ocp_nlp_config *config = config_;
ocp_nlp_dims *dims = dims_;
int ii;
int N = config->N;
int *int_array = (int *) value_array;
/* set ocp_nlp dimension */
if (!strcmp(field, "nx"))
{
// opt var
for (ii = 0; ii <= N; ii++)
{
// set nx
dims->nx[ii] = int_array[ii];
// update nv
dims->nv[ii] = dims->nu[ii] + dims->nx[ii] + 2 * dims->ns[ii];
}
// cost
for (int i = 0; i <= N; i++)
{
config->cost[i]->dims_set(config->cost[i],
dims->cost[i], "nx", &int_array[i]);
}
// dynamics
for (int i = 0; i < N; i++)
{
config->dynamics[i]->dims_set(config->dynamics[i],
dims->dynamics[i], "nx", &int_array[i]);
}
for (int i = 0; i < N; i++)
{
config->dynamics[i]->dims_set(config->dynamics[i],
dims->dynamics[i], "nx1", &int_array[i+1]);
}
// constraints
for (int i = 0; i <= N; i++)
{
config->constraints[i]->dims_set(config->constraints[i], dims->constraints[i],
"nx", &int_array[i]);
}
// qp solver
for (int i = 0; i <= N; i++)
{
config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "nx", &int_array[i]);
}
// regularization
for (ii = 0; ii <= N; ii++)
{
config->regularize->dims_set(config->regularize, dims->regularize, ii, "nx", &int_array[ii]);
}
}
else if (!strcmp(field, "nu"))
{
// nlp opt var
for (int ii = 0; ii <= N; ii++)
{
// set nu
dims->nu[ii] = int_array[ii];
// update nv
dims->nv[ii] = dims->nu[ii] + dims->nx[ii] + 2 * dims->ns[ii];
}
// cost
for (int i = 0; i <= N; i++)
{
config->cost[i]->dims_set(config->cost[i],
dims->cost[i], "nu", &int_array[i]);
}
// dynamics
for (int i = 0; i < N; i++)
{
config->dynamics[i]->dims_set(config->dynamics[i],
dims->dynamics[i], "nu", &int_array[i]);
}
for (int i = 0; i < N; i++)
{
config->dynamics[i]->dims_set(config->dynamics[i],
dims->dynamics[i], "nu1", &int_array[i+1]);
}
// constraints
for (int i = 0; i <= N; i++)
{
config->constraints[i]->dims_set(config->constraints[i], dims->constraints[i],
"nu", &int_array[i]);
}
// qp solver
for (int i = 0; i <= N; i++)
{
config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "nu", &int_array[i]);
}
// regularization
for (ii = 0; ii <= N; ii++)
{
config->regularize->dims_set(config->regularize, dims->regularize, ii, "nu", &int_array[ii]);
}
}
else if (!strcmp(field, "nz"))
{
// nlp opt var
for (int ii = 0; ii <= N; ii++)
{
// set nz
dims->nz[ii] = int_array[ii];
}
// cost
for (int i = 0; i <= N; i++)
{
config->cost[i]->dims_set(config->cost[i],
dims->cost[i], "nz", &int_array[i]);
}
// dynamics
for (int i = 0; i < N; i++)
{
config->dynamics[i]->dims_set(config->dynamics[i],
dims->dynamics[i], "nz", &int_array[i]);
}
// constraints
for (int i = 0; i <= N; i++)
{
config->constraints[i]->dims_set(config->constraints[i], dims->constraints[i],
"nz", &int_array[i]);
}
}
else if (!strcmp(field, "ns"))
{
// nlp opt var
for (int ii = 0; ii <= N; ii++)
{
// set ns
dims->ns[ii] = int_array[ii];
// update nv
dims->nv[ii] = dims->nu[ii] + dims->nx[ii] + 2 * dims->ns[ii];
}
// cost
for (int i = 0; i <= N; i++)
{
config->cost[i]->dims_set(config->cost[i],
dims->cost[i], "ns", &int_array[i]);
}
// qp solver
for (int i = 0; i <= N; i++)
{
config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "ns",
&int_array[i]);
}
}
else
{
printf("error: dims type not available in module ocp_nlp: %s", field);
exit(1);
}
#if 0
/* set ocp_nlp submodule dimensions */
if (strcmp(field, "ns")) // dynamics do not contain slack/soft constraints
{
for (int i = 0; i < N; i++)
{
config->dynamics[i]->dims_set(config->dynamics[i],
dims->dynamics[i], field, &int_array[i]);
}
}
if (!strcmp(field, "nu"))
{
for (int i = 0; i < N; i++)
{
config->dynamics[i]->dims_set(config->dynamics[i],
dims->dynamics[i], "nu1", &int_array[i+1]);
}
}
if (!strcmp(field, "nx"))
{
for (int i = 0; i < N; i++)
{
config->dynamics[i]->dims_set(config->dynamics[i],
dims->dynamics[i], "nx1", &int_array[i+1]);
}
}
for (int i = 0; i <= N; i++) // cost
{
config->cost[i]->dims_set(config->cost[i],
dims->cost[i], field, &int_array[i]);
}
for (int i = 0; i <= N; i++) // constraints
{
config->constraints[i]->dims_set(config->constraints[i], dims->constraints[i],
field, &int_array[i]);
}
if (strcmp(field, "nz")) // qp_solver does not contain nz
{
for (int i = 0; i <= N; i++) // qp_solver
{
config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, field,
&int_array[i]);
}
}
#endif
return;
}
void ocp_nlp_dims_set_constraints(void *config_, void *dims_, int stage, const char *field,
const void* value_)
{
// to set dimension nbx, nbu, ng, nh, nq (quadratic over nonlinear)
ocp_nlp_config *config = config_;
ocp_nlp_dims *dims = dims_;
int *int_value = (int *) value_;
int i = stage;
// set in constraint module
config->constraints[i]->dims_set(config->constraints[i], dims->constraints[i],
field, int_value);
// update ni in ocp_nlp dimensions
config->constraints[i]->dims_get(config->constraints[i], dims->constraints[i],
"ni", &dims->ni[i]);
// update qp_solver dims
if ( (!strcmp(field, "nbx")) || (!strcmp(field, "nbu")) )
{
// qp solver
config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, field, int_value);
// regularization
config->regularize->dims_set(config->regularize, dims->regularize, i, (char *) field, int_value);
}
else if ( (!strcmp(field, "nsbx")) || (!strcmp(field, "nsbu")) )
{
// qp solver
config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, field, int_value);
}
else if ( (!strcmp(field, "ng")) || (!strcmp(field, "nh")) || (!strcmp(field, "nphi")))
{
// update ng_qp_solver in qp_solver
int ng_qp_solver;
config->constraints[i]->dims_get(config->constraints[i], dims->constraints[i],
"ng_qp_solver", &ng_qp_solver);
// qp solver
config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "ng", &ng_qp_solver);
// regularization
config->regularize->dims_set(config->regularize, dims->regularize, i, "ng", &ng_qp_solver);
}
else if ( (!strcmp(field, "nsg")) || (!strcmp(field, "nsh")) || (!strcmp(field, "nsphi")))
{
// update ng_qp_solver in qp_solver
int nsg_qp_solver;
config->constraints[i]->dims_get(config->constraints[i], dims->constraints[i], "nsg_qp_solver", &nsg_qp_solver);
// qp solver
config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "nsg", &nsg_qp_solver);
}
else if ( (!strcmp(field, "nbxe")) || (!strcmp(field, "nbue")) )
{
// qp solver
config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, field, int_value);
}
else if ( (!strcmp(field, "nge")) || (!strcmp(field, "nhe")) || (!strcmp(field, "nphie")))
{
// update ng_qp_solver in qp_solver
int ng_qp_solver;
config->constraints[i]->dims_get(config->constraints[i], dims->constraints[i],
"nge_qp_solver", &ng_qp_solver);
// qp solver
config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "nge", &ng_qp_solver);
}
return;
}
void ocp_nlp_dims_set_cost(void *config_, void *dims_, int stage,
const char *field, const void* value_)
{
// to set dimension ny (output)
ocp_nlp_config *config = config_;
ocp_nlp_dims *dims = dims_;
int *int_value = (int *) value_;
config->cost[stage]->dims_set(config->cost[stage], dims->cost[stage], field, int_value);
}
void ocp_nlp_dims_set_dynamics(void *config_, void *dims_, int stage,
const char *field, const void* value)
{
// mainly for gnsf dimensions
ocp_nlp_config *config = config_;
ocp_nlp_dims *dims = dims_;
int *int_value = (int *) value;
config->dynamics[stage]->dims_set(config->dynamics[stage], dims->dynamics[stage], field, int_value);
}
/************************************************
* in
************************************************/
int ocp_nlp_in_calculate_size_self(int N)
{
int size = sizeof(ocp_nlp_in);
size += N * sizeof(double); // Ts
size += N * sizeof(void *); // dynamics
size += (N + 1) * sizeof(void *); // cost
size += (N + 1) * sizeof(void *); // constraints
return size;
}
int ocp_nlp_in_calculate_size(ocp_nlp_config *config, ocp_nlp_dims *dims)
{
int ii;
int N = dims->N;
int size = ocp_nlp_in_calculate_size_self(N);
// dynamics
for (ii = 0; ii < N; ii++)
{
size +=
config->dynamics[ii]->model_calculate_size(config->dynamics[ii], dims->dynamics[ii]);
}
// cost
for (ii = 0; ii <= N; ii++)
{
size += config->cost[ii]->model_calculate_size(config->cost[ii], dims->cost[ii]);
}
// constraints
for (ii = 0; ii <= N; ii++)
{
size += config->constraints[ii]->model_calculate_size(config->constraints[ii],
dims->constraints[ii]);
}
size += 8; // initial align
// make_int_multiple_of(64, &size);
return size;
}
ocp_nlp_in *ocp_nlp_in_assign_self(int N, void *raw_memory)
{
char *c_ptr = (char *) raw_memory;
// initial align
align_char_to(8, &c_ptr);
// struct
ocp_nlp_in *in = (ocp_nlp_in *) c_ptr;
c_ptr += sizeof(ocp_nlp_in);
// Ts
in->Ts = (double *) c_ptr;
c_ptr += N * sizeof(double);
// dynamics
in->dynamics = (void **) c_ptr;
c_ptr += N * sizeof(void *);
// cost
in->cost = (void **) c_ptr;
c_ptr += (N + 1) * sizeof(void *);
// constraints
in->constraints = (void **) c_ptr;
c_ptr += (N + 1) * sizeof(void *);
return in;
}
ocp_nlp_in *ocp_nlp_in_assign(ocp_nlp_config *config, ocp_nlp_dims *dims, void *raw_memory)
{
int ii;
int N = dims->N;
char *c_ptr = (char *) raw_memory;
// struct
ocp_nlp_in *in = ocp_nlp_in_assign_self(N, c_ptr);
c_ptr += ocp_nlp_in_calculate_size_self(N);
// dynamics
for (ii = 0; ii < N; ii++)
{
in->dynamics[ii] =
config->dynamics[ii]->model_assign(config->dynamics[ii], dims->dynamics[ii], c_ptr);
c_ptr +=
config->dynamics[ii]->model_calculate_size(config->dynamics[ii], dims->dynamics[ii]);
}
// cost
for (ii = 0; ii <= N; ii++)
{
in->cost[ii] = config->cost[ii]->model_assign(config->cost[ii], dims->cost[ii], c_ptr);
c_ptr += config->cost[ii]->model_calculate_size(config->cost[ii], dims->cost[ii]);
}
// constraints
for (ii = 0; ii <= N; ii++)
{
in->constraints[ii] = config->constraints[ii]->model_assign(config->constraints[ii],
dims->constraints[ii], c_ptr);
c_ptr += config->constraints[ii]->model_calculate_size(config->constraints[ii],
dims->constraints[ii]);
}
assert((char *) raw_memory + ocp_nlp_in_calculate_size(config, dims) >= c_ptr);
return in;
}
/************************************************
* out
************************************************/
int ocp_nlp_out_calculate_size(ocp_nlp_config *config, ocp_nlp_dims *dims)
{
// extract dims
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
// int *nu = dims->nu;
int *ni = dims->ni;
int *nz = dims->nz;
int size = sizeof(ocp_nlp_out);
size += 4 * (N + 1) * sizeof(struct blasfeo_dvec); // ux, lam, t, z
size += 1 * N * sizeof(struct blasfeo_dvec); // pi
for (int ii = 0; ii < N; ii++)
{
size += 1 * blasfeo_memsize_dvec(nv[ii]); // ux
size += 1 * blasfeo_memsize_dvec(nz[ii]); // z
size += 2 * blasfeo_memsize_dvec(2 * ni[ii]); // lam, t
size += 1 * blasfeo_memsize_dvec(nx[ii + 1]); // pi
}
size += 1 * blasfeo_memsize_dvec(nv[N]); // ux
size += 1 * blasfeo_memsize_dvec(nz[N]); // z
size += 2 * blasfeo_memsize_dvec(2 * ni[N]); // lam, t
size += 8; // initial align
size += 8; // blasfeo_struct align
size += 64; // blasfeo_mem align
// make_int_multiple_of(64, &size);
return size;
}
ocp_nlp_out *ocp_nlp_out_assign(ocp_nlp_config *config, ocp_nlp_dims *dims, void *raw_memory)
{
// loop index
int ii;
// extract sizes
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
// int *nu = dims->nu;
int *ni = dims->ni;
int *nz = dims->nz;
char *c_ptr = (char *) raw_memory;
// initial align
align_char_to(8, &c_ptr);
ocp_nlp_out *out = (ocp_nlp_out *) c_ptr;
c_ptr += sizeof(ocp_nlp_out);
// blasfeo_struct align
align_char_to(8, &c_ptr);
// blasfeo_dvec_struct
// ux
assign_and_advance_blasfeo_dvec_structs(N + 1, &out->ux, &c_ptr);
// z
assign_and_advance_blasfeo_dvec_structs(N + 1, &out->z, &c_ptr);
// pi
assign_and_advance_blasfeo_dvec_structs(N, &out->pi, &c_ptr);
// lam
assign_and_advance_blasfeo_dvec_structs(N + 1, &out->lam, &c_ptr);
// t
assign_and_advance_blasfeo_dvec_structs(N + 1, &out->t, &c_ptr);
// blasfeo_mem align
align_char_to(64, &c_ptr);
// blasfeo_dvec
// ux
for (int ii = 0; ii <= N; ++ii)
{
assign_and_advance_blasfeo_dvec_mem(nv[ii], out->ux + ii, &c_ptr);
}
// z
for (int ii = 0; ii <= N; ++ii)
{
assign_and_advance_blasfeo_dvec_mem(nz[ii], out->z + ii, &c_ptr);
}
// pi
for (int ii = 0; ii < N; ++ii)
{
assign_and_advance_blasfeo_dvec_mem(nx[ii + 1], out->pi + ii, &c_ptr);
}
// lam
for (int ii = 0; ii <= N; ++ii)
{
assign_and_advance_blasfeo_dvec_mem(2 * ni[ii], out->lam + ii, &c_ptr);
}
// t
for (int ii = 0; ii <= N; ++ii)
{
assign_and_advance_blasfeo_dvec_mem(2 * ni[ii], out->t + ii, &c_ptr);
}
// zero solution
for(ii=0; ii<N; ii++)
{
blasfeo_dvecse(nv[ii], 0.0, out->ux+ii, 0);
blasfeo_dvecse(nz[ii], 0.0, out->z+ii, 0);
blasfeo_dvecse(nx[ii+1], 0.0, out->pi+ii, 0);
blasfeo_dvecse(2*ni[ii], 0.0, out->lam+ii, 0);
blasfeo_dvecse(2*ni[ii], 0.0, out->t+ii, 0);
}
ii = N;
blasfeo_dvecse(nv[ii], 0.0, out->ux+ii, 0);
blasfeo_dvecse(nz[ii], 0.0, out->z+ii, 0);
blasfeo_dvecse(2*ni[ii], 0.0, out->lam+ii, 0);
blasfeo_dvecse(2*ni[ii], 0.0, out->t+ii, 0);
assert((char *) raw_memory + ocp_nlp_out_calculate_size(config, dims) >= c_ptr);
return out;
}
/************************************************
* options
************************************************/
int ocp_nlp_opts_calculate_size(void *config_, void *dims_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
int N = dims->N;
int size = 0;
size += sizeof(ocp_nlp_opts);
size += qp_solver->opts_calculate_size(qp_solver, dims->qp_solver);
size += config->regularize->opts_calculate_size();
// dynamics
size += N * sizeof(void *);
for (int ii = 0; ii < N; ii++)
{
size += dynamics[ii]->opts_calculate_size(dynamics[ii], dims->dynamics[ii]);
}
// cost
size += (N + 1) * sizeof(void *);
for (int ii = 0; ii <= N; ii++)
{
size += cost[ii]->opts_calculate_size(cost[ii], dims->cost[ii]);
}
// constraints
size += (N + 1) * sizeof(void *);
for (int ii = 0; ii <= N; ii++)
{
size += constraints[ii]->opts_calculate_size(constraints[ii], dims->constraints[ii]);
}
return size;
}
void *ocp_nlp_opts_assign(void *config_, void *dims_, void *raw_memory)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
int N = dims->N;
char *c_ptr = (char *) raw_memory;
ocp_nlp_opts *opts = (ocp_nlp_opts *) c_ptr;
c_ptr += sizeof(ocp_nlp_opts);
opts->qp_solver_opts = qp_solver->opts_assign(qp_solver, dims->qp_solver, c_ptr);
c_ptr += qp_solver->opts_calculate_size(qp_solver, dims->qp_solver);
opts->regularize = config->regularize->opts_assign(c_ptr);
c_ptr += config->regularize->opts_calculate_size();
// dynamics
opts->dynamics = (void **) c_ptr;
c_ptr += N * sizeof(void *);
for (int ii = 0; ii < N; ii++)
{
opts->dynamics[ii] = dynamics[ii]->opts_assign(dynamics[ii], dims->dynamics[ii], c_ptr);
c_ptr += dynamics[ii]->opts_calculate_size(dynamics[ii], dims->dynamics[ii]);
}
// cost
opts->cost = (void **) c_ptr;
c_ptr += (N + 1) * sizeof(void *);
for (int ii = 0; ii <= N; ii++)
{
opts->cost[ii] = cost[ii]->opts_assign(cost[ii], dims->cost[ii], c_ptr);
c_ptr += cost[ii]->opts_calculate_size(cost[ii], dims->cost[ii]);
}
// constraints
opts->constraints = (void **) c_ptr;
c_ptr += (N + 1) * sizeof(void *);
for (int ii = 0; ii <= N; ii++)
{
opts->constraints[ii] =
constraints[ii]->opts_assign(constraints[ii], dims->constraints[ii], c_ptr);
c_ptr += constraints[ii]->opts_calculate_size(constraints[ii], dims->constraints[ii]);
}
assert((char *) raw_memory + ocp_nlp_opts_calculate_size(config, dims) >= c_ptr);
return opts;
}
void ocp_nlp_opts_initialize_default(void *config_, void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_opts *opts = opts_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
ocp_nlp_reg_config *regularize = config->regularize;
int ii;
int N = dims->N;
opts->reuse_workspace = 1;
#if defined(ACADOS_WITH_OPENMP)
opts->num_threads = ACADOS_NUM_THREADS;
#endif
opts->step_length = 1.0;
// submodules opts
// qp solver
qp_solver->opts_initialize_default(qp_solver, dims->qp_solver, opts->qp_solver_opts);
// regularization
regularize->opts_initialize_default(regularize, dims->regularize, opts->regularize);
// dynamics
for (ii = 0; ii < N; ii++)
{
dynamics[ii]->opts_initialize_default(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]);
}
// cost
for (ii = 0; ii <= N; ii++)
{
cost[ii]->opts_initialize_default(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
for (ii = 0; ii <= N; ii++)
{
constraints[ii]->opts_initialize_default(constraints[ii], dims->constraints[ii], opts->constraints[ii]);
}
return;
}
void ocp_nlp_opts_update(void *config_, void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_opts *opts = opts_;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
int ii;
int N = dims->N;
qp_solver->opts_update(qp_solver, dims->qp_solver, opts->qp_solver_opts);
// dynamics
for (ii = 0; ii < N; ii++)
{
dynamics[ii]->opts_update(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]);
}
// cost
for (ii = 0; ii <= N; ii++)
{
cost[ii]->opts_update(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
for (ii = 0; ii <= N; ii++)
{
constraints[ii]->opts_update(constraints[ii], dims->constraints[ii], opts->constraints[ii]);
}
return;
}
void ocp_nlp_opts_set(void *config_, void *opts_, const char *field, void* value)
{
ocp_nlp_opts *opts = (ocp_nlp_opts *) opts_;
ocp_nlp_config *config = config_;
int ii;
char module[MAX_STR_LEN];
char *ptr_module = NULL;
int module_length = 0;
// extract module name, i.e. substring in field before '_'
char *char_ = strchr(field, '_');
if (char_!=NULL)
{
module_length = char_-field;
for (ii=0; ii<module_length; ii++)
module[ii] = field[ii];
module[module_length] = '\0'; // add end of string
ptr_module = module;
}
// pass options to QP module
if ( ptr_module!=NULL && (!strcmp(ptr_module, "qp")) )
{
config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts,
field+module_length+1, value);
}
// pass options to dynamics module
else // nlp opts
{
if (!strcmp(field, "reuse_workspace"))
{
int* reuse_workspace = (int *) value;
opts->reuse_workspace = *reuse_workspace;
}
else if (!strcmp(field, "num_threads"))
{
int* num_threads = (int *) value;
opts->num_threads = *num_threads;
}
else if (!strcmp(field, "step_length"))
{
double* step_length = (double *) value;
opts->step_length = *step_length;
}
else if (!strcmp(field, "exact_hess"))
{
int N = config->N;
// cost
for (ii=0; ii<=N; ii++)
config->cost[ii]->opts_set(config->cost[ii], opts->cost[ii], "exact_hess", value);
// dynamics
for (ii=0; ii<N; ii++)
config->dynamics[ii]->opts_set(config->dynamics[ii], opts->dynamics[ii], "compute_hess", value);
// constraints TODO disabled for now as prevents convergence !!!
// for (ii=0; ii<=N; ii++)
// config->constraints[ii]->opts_set(config->constraints[ii], opts->constraints[ii], "compute_hess", value);
}
else
{
printf("\nerror: ocp_nlp_opts_set: wrong field: %s\n", field);
exit(1);
}
}
return;
}
void ocp_nlp_opts_set_at_stage(void *config_, void *opts_, int stage, const char *field, void* value)
{
ocp_nlp_opts *opts = (ocp_nlp_opts *) opts_;
ocp_nlp_config *config = config_;
int ii;
char module[MAX_STR_LEN];
char *ptr_module = NULL;
int module_length = 0;
// extract module name
char *char_ = strchr(field, '_');
if (char_!=NULL)
{
module_length = char_-field;
for (ii=0; ii<module_length; ii++)
module[ii] = field[ii];
module[module_length] = '\0'; // add end of string
ptr_module = module;
}
// pass options to dynamics module
if ( ptr_module!=NULL && (!strcmp(ptr_module, "dynamics")) )
{
config->dynamics[stage]->opts_set( config->dynamics[stage], opts->dynamics[stage],
field+module_length+1, value );
}
// pass options to cost module
else if ( ptr_module!=NULL && (!strcmp(ptr_module, "cost")) )
{
config->cost[stage]->opts_set( config->cost[stage], opts->cost[stage],
field+module_length+1, value);
}
// pass options to constraint module
else if ( ptr_module!=NULL && (!strcmp(ptr_module, "constraints")) )
{
config->constraints[stage]->opts_set( config->constraints[stage], opts->constraints[stage],
(char *) field+module_length+1, value);
}
else
{
printf("\nerror: ocp_nlp_opts_set_at_stage: wrong field: %s\n", field);
exit(1);
}
return;
}
/************************************************
* memory
************************************************/
int ocp_nlp_memory_calculate_size(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_opts *opts)
{
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
// extract dims
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
int *nz = dims->nz;
int *nu = dims->nu;
int *ni = dims->ni;
int size = sizeof(ocp_nlp_memory);
// qp in
size += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims);
// qp out
size += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims);
// qp solver
size += qp_solver->memory_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts);
// regularization
size += config->regularize->memory_calculate_size(config->regularize, dims->regularize, opts->regularize);
// dynamics
size += N * sizeof(void *);
for (int ii = 0; ii < N; ii++)
{
size += dynamics[ii]->memory_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]);
}
// cost
size += (N + 1) * sizeof(void *);
for (int ii = 0; ii <= N; ii++)
{
size += cost[ii]->memory_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
size += (N + 1) * sizeof(void *);
for (int ii = 0; ii <= N; ii++)
{
size += constraints[ii]->memory_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]);
}
size += (N+1)*sizeof(bool); // set_sim_guess
size += (N+1)*sizeof(struct blasfeo_dmat); // dzduxt
size += 6*(N+1)*sizeof(struct blasfeo_dvec); // cost_grad ineq_fun ineq_adj dyn_adj sim_guess z_alg
size += 1*N*sizeof(struct blasfeo_dvec); // dyn_fun
for (int ii = 0; ii < N; ii++)
{
size += 1*blasfeo_memsize_dmat(nu[ii]+nx[ii], nz[ii]); // dzduxt
size += 1*blasfeo_memsize_dvec(nz[ii]); // z_alg
size += 2*blasfeo_memsize_dvec(nv[ii]); // cost_grad ineq_adj
size += 1*blasfeo_memsize_dvec(nu[ii] + nx[ii]); // dyn_adj
size += 1*blasfeo_memsize_dvec(nx[ii + 1]); // dyn_fun
size += 1*blasfeo_memsize_dvec(2 * ni[ii]); // ineq_fun
size += 1*blasfeo_memsize_dvec(nx[ii] + nz[ii]); // sim_guess
}
size += 1*blasfeo_memsize_dmat(nu[N]+nx[N], nz[N]); // dzduxt
size += 1*blasfeo_memsize_dvec(nz[N]); // z_alg
size += 2*blasfeo_memsize_dvec(nv[N]); // cost_grad ineq_adj
size += 1*blasfeo_memsize_dvec(nu[N] + nx[N]); // dyn_adj
size += 1*blasfeo_memsize_dvec(2 * ni[N]); // ineq_fun
size += 1*blasfeo_memsize_dvec(nx[N] + nz[N]); // sim_guess
size += 8; // initial align
size += 8; // middle align
size += 8; // blasfeo_struct align
size += 64; // blasfeo_mem align
make_int_multiple_of(8, &size);
return size;
}
ocp_nlp_memory *ocp_nlp_memory_assign(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_opts *opts, void *raw_memory)
{
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
// extract sizes
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
int *nz = dims->nz;
int *nu = dims->nu;
int *ni = dims->ni;
char *c_ptr = (char *) raw_memory;
// initial align
align_char_to(8, &c_ptr);
// struct
ocp_nlp_memory *mem = (ocp_nlp_memory *) c_ptr;
c_ptr += sizeof(ocp_nlp_memory);
// dynamics
mem->dynamics = (void **) c_ptr;
c_ptr += N*sizeof(void *);
// cost
mem->cost = (void **) c_ptr;
c_ptr += (N+1)*sizeof(void *);
// constraints
mem->constraints = (void **) c_ptr;
c_ptr += (N+1)*sizeof(void *);
// middle align
align_char_to(8, &c_ptr);
// qp in
mem->qp_in = ocp_qp_in_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims);
// qp out
mem->qp_out = ocp_qp_out_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims);
// QP solver
mem->qp_solver_mem = qp_solver->memory_assign(qp_solver, dims->qp_solver, opts->qp_solver_opts, c_ptr);
c_ptr += qp_solver->memory_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts);
// regularization
mem->regularize_mem = config->regularize->memory_assign(config->regularize, dims->regularize,
opts->regularize, c_ptr);
c_ptr += config->regularize->memory_calculate_size(config->regularize, dims->regularize,
opts->regularize);
// dynamics
for (int ii = 0; ii < N; ii++)
{
mem->dynamics[ii] = dynamics[ii]->memory_assign(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii], c_ptr);
c_ptr += dynamics[ii]->memory_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]);
}
// cost
for (int ii = 0; ii <= N; ii++)
{
mem->cost[ii] = cost[ii]->memory_assign(cost[ii], dims->cost[ii], opts->cost[ii], c_ptr);
c_ptr += cost[ii]->memory_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
for (int ii = 0; ii <= N; ii++)
{
mem->constraints[ii] = constraints[ii]->memory_assign(constraints[ii],
dims->constraints[ii], opts->constraints[ii], c_ptr);
c_ptr += constraints[ii]->memory_calculate_size( constraints[ii], dims->constraints[ii],
opts->constraints[ii]);
}
// set_sim_guess
assign_and_advance_bool(N+1, &mem->set_sim_guess, &c_ptr);
for (int ii = 0; ii <= N; ++ii)
{
mem->set_sim_guess[ii] = false;
}
// blasfeo_struct align
align_char_to(8, &c_ptr);
// dzduxt
mem->dzduxt = (struct blasfeo_dmat *) c_ptr;
c_ptr += (N+1)*sizeof(struct blasfeo_dmat);
// z_alg
mem->z_alg = (struct blasfeo_dvec *) c_ptr;
c_ptr += (N+1)*sizeof(struct blasfeo_dvec);
// cost_grad
assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->cost_grad, &c_ptr);
// ineq_fun
assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->ineq_fun, &c_ptr);
// ineq_adj
assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->ineq_adj, &c_ptr);
// dyn_fun
assign_and_advance_blasfeo_dvec_structs(N, &mem->dyn_fun, &c_ptr);
// dyn_adj
assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->dyn_adj, &c_ptr);
// sim_guess
assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->sim_guess, &c_ptr);
// blasfeo_mem align
align_char_to(64, &c_ptr);
// dzduxt
for (int ii=0; ii<=N; ii++)
{
blasfeo_create_dmat(nu[ii]+nx[ii], nz[ii], mem->dzduxt+ii, c_ptr);
c_ptr += blasfeo_memsize_dmat(nu[ii]+nx[ii], nz[ii]);
}
// z_alg
for (int ii=0; ii<=N; ii++)
{
blasfeo_create_dvec(nz[ii], mem->z_alg+ii, c_ptr);
c_ptr += blasfeo_memsize_dvec(nz[ii]);
}
// cost_grad
for (int ii = 0; ii <= N; ii++)
{
assign_and_advance_blasfeo_dvec_mem(nv[ii], mem->cost_grad + ii, &c_ptr);
}
// ineq_fun
for (int ii = 0; ii <= N; ii++)
{
assign_and_advance_blasfeo_dvec_mem(2 * ni[ii], mem->ineq_fun + ii, &c_ptr);
}
// ineq_adj
for (int ii = 0; ii <= N; ii++)
{
assign_and_advance_blasfeo_dvec_mem(nv[ii], mem->ineq_adj + ii, &c_ptr);
}
// dyn_fun
for (int ii = 0; ii < N; ii++)
{
assign_and_advance_blasfeo_dvec_mem(nx[ii + 1], mem->dyn_fun + ii, &c_ptr);
}
// dyn_adj
for (int ii = 0; ii <= N; ii++)
{
assign_and_advance_blasfeo_dvec_mem(nu[ii] + nx[ii], mem->dyn_adj + ii, &c_ptr);
}
// sim_guess
for (int ii = 0; ii <= N; ++ii)
{
assign_and_advance_blasfeo_dvec_mem(nx[ii] + nz[ii], mem->sim_guess + ii, &c_ptr);
// set to 0;
blasfeo_dvecse(nx[ii] + nz[ii], 0.0, mem->sim_guess+ii, 0);
// printf("sim_guess ii %d: %p\n", ii, mem->sim_guess+ii);
}
// printf("created memory %p\n", mem);
return mem;
}
/************************************************
* workspace
************************************************/
int ocp_nlp_workspace_calculate_size(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_opts *opts)
{
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
int ii;
int N = dims->N;
// int *nx = dims->nx;
// int *nu = dims->nu;
// int *nz = dims->nz;
int size = 0;
int size_tmp = 0;
int tmp;
// nlp
size += sizeof(ocp_nlp_workspace);
// tmp_nlp_out
size += ocp_nlp_out_calculate_size(config, dims);
// weights_nlp_out
size += ocp_nlp_out_calculate_size(config, dims);
// array of pointers
// cost
size += (N+1)*sizeof(void *);
// dynamics
size += N*sizeof(void *);
// constraints
size += (N+1)*sizeof(void *);
// module workspace
if (opts->reuse_workspace)
{
#if defined(ACADOS_WITH_OPENMP)
// qp solver
size += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver,
opts->qp_solver_opts);
// dynamics
for (ii = 0; ii < N; ii++)
{
size += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]);
}
// cost
for (ii = 0; ii <= N; ii++)
{
size += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
for (ii = 0; ii <= N; ii++)
{
size += constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]);
}
#else
// qp solver
tmp = qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
// dynamics
for (ii = 0; ii < N; ii++)
{
tmp = dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
}
// cost
for (ii = 0; ii <= N; ii++)
{
tmp = cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
}
// constraints
for (ii = 0; ii <= N; ii++)
{
tmp = constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
}
size += size_tmp;
#endif
}
else
{
// qp solver
size += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver,
opts->qp_solver_opts);
// dynamics
for (ii = 0; ii < N; ii++)
{
size += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]);
}
// cost
for (ii = 0; ii <= N; ii++)
{
size += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
for (ii = 0; ii <= N; ii++)
{
size += constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]);
}
}
return size;
}
ocp_nlp_workspace *ocp_nlp_workspace_assign(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_opts *opts, ocp_nlp_memory *mem, void *raw_memory)
{
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_dynamics_config **dynamics = config->dynamics;
ocp_nlp_cost_config **cost = config->cost;
ocp_nlp_constraints_config **constraints = config->constraints;
int N = dims->N;
// int *nx = dims->nx;
// int *nu = dims->nu;
// int *nz = dims->nz;
char *c_ptr = (char *) raw_memory;
ocp_nlp_workspace *work = (ocp_nlp_workspace *) c_ptr;
c_ptr += sizeof(ocp_nlp_workspace);
// tmp_nlp_out
work->tmp_nlp_out = ocp_nlp_out_assign(config, dims, c_ptr);
c_ptr += ocp_nlp_out_calculate_size(config, dims);
// weights_nlp_out
work->weights_nlp_out = ocp_nlp_out_assign(config, dims, c_ptr);
c_ptr += ocp_nlp_out_calculate_size(config, dims);
// array of pointers
//
work->dynamics = (void **) c_ptr;
c_ptr += N*sizeof(void *);
//
work->cost = (void **) c_ptr;
c_ptr += (N+1)*sizeof(void *);
//
work->constraints = (void **) c_ptr;
c_ptr += (N+1)*sizeof(void *);
if (opts->reuse_workspace)
{
#if defined(ACADOS_WITH_OPENMP)
// qp solver
work->qp_work = (void *) c_ptr;
c_ptr += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts);
// dynamics
for (int ii = 0; ii < N; ii++)
{
work->dynamics[ii] = c_ptr;
c_ptr += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]);
}
// cost
for (int ii = 0; ii <= N; ii++)
{
work->cost[ii] = c_ptr;
c_ptr += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
for (int ii = 0; ii <= N; ii++)
{
work->constraints[ii] = c_ptr;
c_ptr += constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]);
}
#else
int size_tmp = 0;
int tmp;
// qp solver
work->qp_work = (void *) c_ptr;
tmp = qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
// dynamics
for (int ii = 0; ii < N; ii++)
{
work->dynamics[ii] = c_ptr;
tmp = dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
}
// cost
for (int ii = 0; ii <= N; ii++)
{
work->cost[ii] = c_ptr;
tmp = cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
}
// constraints
for (int ii = 0; ii <= N; ii++)
{
work->constraints[ii] = c_ptr;
tmp = constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]);
size_tmp = tmp > size_tmp ? tmp : size_tmp;
}
c_ptr += size_tmp;
#endif
}
else
{
// qp solver
work->qp_work = (void *) c_ptr;
c_ptr += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver,
opts->qp_solver_opts);
// dynamics
for (int ii = 0; ii < N; ii++)
{
work->dynamics[ii] = c_ptr;
c_ptr += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]);
}
// cost
for (int ii = 0; ii <= N; ii++)
{
work->cost[ii] = c_ptr;
c_ptr += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]);
}
// constraints
for (int ii = 0; ii <= N; ii++)
{
work->constraints[ii] = c_ptr;
c_ptr += constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]);
}
}
assert((char *) work + ocp_nlp_workspace_calculate_size(config, dims, opts) >= c_ptr);
return work;
}
/************************************************
* functions
************************************************/
void ocp_nlp_initialize_qp(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in,
ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work)
{
int ii;
int N = dims->N;
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (ii = 0; ii <= N; ii++)
{
// cost
config->cost[ii]->initialize(config->cost[ii], dims->cost[ii], in->cost[ii],
opts->cost[ii], mem->cost[ii], work->cost[ii]);
// dynamics
if (ii < N)
config->dynamics[ii]->initialize(config->dynamics[ii], dims->dynamics[ii],
in->dynamics[ii], opts->dynamics[ii], mem->dynamics[ii], work->dynamics[ii]);
// constraints
config->constraints[ii]->initialize(config->constraints[ii], dims->constraints[ii],
in->constraints[ii], opts->constraints[ii], mem->constraints[ii], work->constraints[ii]);
}
return;
}
void ocp_nlp_approximate_qp_matrices(ocp_nlp_config *config, ocp_nlp_dims *dims,
ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem,
ocp_nlp_workspace *work)
{
int i;
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
int *nu = dims->nu;
int *ni = dims->ni;
/* stage-wise multiple shooting lagrangian evaluation */
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (i = 0; i <= N; i++)
{
// init Hessian to 0
blasfeo_dgese(nu[i] + nx[i], nu[i] + nx[i], 0.0, mem->qp_in->RSQrq+i, 0, 0);
// dynamics
if (i < N)
config->dynamics[i]->update_qp_matrices(config->dynamics[i], dims->dynamics[i],
in->dynamics[i], opts->dynamics[i], mem->dynamics[i], work->dynamics[i]);
// cost
config->cost[i]->update_qp_matrices(config->cost[i], dims->cost[i], in->cost[i],
opts->cost[i], mem->cost[i], work->cost[i]);
// constraints
config->constraints[i]->update_qp_matrices(config->constraints[i], dims->constraints[i],
in->constraints[i], opts->constraints[i], mem->constraints[i], work->constraints[i]);
}
/* collect stage-wise evaluations */
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (i=0; i <= N; i++)
{
// nlp mem: cost_grad
struct blasfeo_dvec *cost_grad = config->cost[i]->memory_get_grad_ptr(mem->cost[i]);
blasfeo_dveccp(nv[i], cost_grad, 0, mem->cost_grad + i, 0);
// nlp mem: dyn_fun
if (i < N)
{
struct blasfeo_dvec *dyn_fun
= config->dynamics[i]->memory_get_fun_ptr(mem->dynamics[i]);
blasfeo_dveccp(nx[i + 1], dyn_fun, 0, mem->dyn_fun + i, 0);
}
// nlp mem: dyn_adj
if (i < N)
{
struct blasfeo_dvec *dyn_adj
= config->dynamics[i]->memory_get_adj_ptr(mem->dynamics[i]);
blasfeo_dveccp(nu[i] + nx[i], dyn_adj, 0, mem->dyn_adj + i, 0);
}
else
{
blasfeo_dvecse(nu[N] + nx[N], 0.0, mem->dyn_adj + N, 0);
}
if (i > 0)
{
struct blasfeo_dvec *dyn_adj
= config->dynamics[i-1]->memory_get_adj_ptr(mem->dynamics[i-1]);
blasfeo_daxpy(nx[i], 1.0, dyn_adj, nu[i-1]+nx[i-1], mem->dyn_adj+i, nu[i],
mem->dyn_adj+i, nu[i]);
}
// nlp mem: ineq_fun
struct blasfeo_dvec *ineq_fun =
config->constraints[i]->memory_get_fun_ptr(mem->constraints[i]);
blasfeo_dveccp(2 * ni[i], ineq_fun, 0, mem->ineq_fun + i, 0);
// nlp mem: ineq_adj
struct blasfeo_dvec *ineq_adj =
config->constraints[i]->memory_get_adj_ptr(mem->constraints[i]);
blasfeo_dveccp(nv[i], ineq_adj, 0, mem->ineq_adj + i, 0);
}
for (i = 0; i <= N; i++)
{
// TODO(rien) where should the update happen??? move to qp update ???
// TODO(all): fix and move where appropriate
// if (i<N)
// {
// ocp_nlp_dynamics_opts *dynamics_opts = opts->dynamics[i];
// sim_opts *opts = dynamics_opts->sim_solver;
// if (opts->scheme != NULL && opts->scheme->type != exact)
// {
// for (int_t j = 0; j < nx; j++)
// BLASFEO_DVECEL(nlp_mem->cost_grad+i, nu+j) += work->sim_out[i]->grad[j];
// for (int_t j = 0; j < nu; j++)
// BLASFEO_DVECEL(nlp_mem->cost_grad+i, j) += work->sim_out[i]->grad[nx+j];
// }
// }
}
return;
}
// update QP rhs for SQP (step prim var, abs dual var)
// TODO(all): move in dynamics, cost, constraints modules ???
void ocp_nlp_approximate_qp_vectors_sqp(ocp_nlp_config *config,
ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts,
ocp_nlp_memory *mem, ocp_nlp_workspace *work)
{
int i;
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
// int *nu = dims->nu;
int *ni = dims->ni;
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (i = 0; i <= N; i++)
{
// g
blasfeo_dveccp(nv[i], mem->cost_grad + i, 0, mem->qp_in->rqz + i, 0);
// b
if (i < N)
blasfeo_dveccp(nx[i + 1], mem->dyn_fun + i, 0, mem->qp_in->b + i, 0);
// d
blasfeo_dveccp(2 * ni[i], mem->ineq_fun + i, 0, mem->qp_in->d + i, 0);
}
return;
}
void ocp_nlp_embed_initial_value(ocp_nlp_config *config, ocp_nlp_dims *dims,
ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts,
ocp_nlp_memory *mem, ocp_nlp_workspace *work)
{
int *ni = dims->ni;
// constraints
config->constraints[0]->bounds_update(config->constraints[0], dims->constraints[0],
in->constraints[0], opts->constraints[0], mem->constraints[0], work->constraints[0]);
// nlp mem: ineq_fun
struct blasfeo_dvec *ineq_fun =
config->constraints[0]->memory_get_fun_ptr(mem->constraints[0]);
blasfeo_dveccp(2 * ni[0], ineq_fun, 0, mem->ineq_fun, 0);
// d
blasfeo_dveccp(2 * ni[0], mem->ineq_fun, 0, mem->qp_in->d, 0);
return;
}
double ocp_nlp_evaluate_merit_fun(ocp_nlp_config *config, ocp_nlp_dims *dims,
ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts,
ocp_nlp_memory *mem, ocp_nlp_workspace *work)
{
int i, j;
int N = dims->N;
int *nx = dims->nx;
int *ni = dims->ni;
double merit_fun = 0.0;
// compute fun value
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (i=0; i<=N; i++)
{
// cost
config->cost[i]->compute_fun(config->cost[i], dims->cost[i], in->cost[i], opts->cost[i], mem->cost[i], work->cost[i]);
}
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (i=0; i<N; i++)
{
// cost
config->dynamics[i]->compute_fun(config->dynamics[i], dims->dynamics[i], in->dynamics[i], opts->dynamics[i], mem->dynamics[i], work->dynamics[i]);
}
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (i=0; i<=N; i++)
{
// constr
config->constraints[i]->compute_fun(config->constraints[i], dims->constraints[i],
in->constraints[i], opts->constraints[i],
mem->constraints[i], work->constraints[i]);
}
double *tmp_fun;
double tmp;
struct blasfeo_dvec *tmp_fun_vec;
double cost_fun = 0.0;
for(i=0; i<=N; i++)
{
tmp_fun = config->cost[i]->memory_get_fun_ptr(mem->cost[i]);
cost_fun += *tmp_fun;
}
double dyn_fun = 0.0;
for(i=0; i<N; i++)
{
// printf("\ni %d\n", i);
tmp_fun_vec = config->dynamics[i]->memory_get_fun_ptr(mem->dynamics[i]);
// blasfeo_print_exp_tran_dvec(nx[i+1], tmp_fun_vec, 0);
// blasfeo_print_exp_tran_dvec(nx[i+1], work->weights_nlp_out->pi+i, 0);
for(j=0; j<nx[i+1]; j++)
{
// printf("\n%e %e\n", fabs(BLASFEO_DVECEL(work->weights_nlp_out->pi+i, j)), fabs(BLASFEO_DVECEL(tmp_fun_vec, j)));
dyn_fun += fabs(BLASFEO_DVECEL(work->weights_nlp_out->pi+i, j)) * fabs(BLASFEO_DVECEL(tmp_fun_vec, j));
}
}
double constr_fun = 0.0;
for(i=0; i<=N; i++)
{
// printf("\ni %d\n", i);
tmp_fun_vec = config->constraints[i]->memory_get_fun_ptr(mem->constraints[i]);
// blasfeo_print_exp_tran_dvec(2*ni[i], tmp_fun_vec, 0);
// blasfeo_print_exp_tran_dvec(2*ni[i], work->weights_nlp_out->lam+i, 0);
for(j=0; j<2*ni[i]; j++)
{
tmp = BLASFEO_DVECEL(tmp_fun_vec, j);
tmp = tmp>0.0 ? tmp : 0.0;
// printf("\n%e %e\n", fabs(BLASFEO_DVECEL(work->weights_nlp_out->pi+i, j)), fabs(BLASFEO_DVECEL(tmp_fun_vec, j)));
constr_fun += fabs(BLASFEO_DVECEL(work->weights_nlp_out->lam+i, j)) * tmp;
}
}
merit_fun = cost_fun + dyn_fun + constr_fun;
printf("\n%e %e %e %e\n", merit_fun, cost_fun, dyn_fun, constr_fun);
return merit_fun;
}
void ocp_nlp_update_variables_sqp(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in,
ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work)
{
int i;
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
int *nu = dims->nu;
int *ni = dims->ni;
int *nz = dims->nz;
// ocp_nlp_config *config = (ocp_nlp_config *) config_;
// (fixed) step length
double alpha = opts->step_length;
#if 0 // XXX test piece of code
double tmp0, tmp1;
// current point
for (i = 0; i <= N; i++)
blasfeo_dveccp(nv[i], out->ux+i, 0, work->tmp_nlp_out->ux+i, 0);
for (i = 0; i < N; i++)
blasfeo_dveccp(nx[i+1], out->pi+i, 0, work->tmp_nlp_out->pi+i, 0);
for (i = 0; i <= N; i++)
blasfeo_dveccp(2*ni[i], out->lam+i, 0, work->tmp_nlp_out->lam+i, 0);
// linear update of algebraic variables using state and input sensitivity
// if (i < N)
// {
// blasfeo_dgemv_t(nu[i]+nx[i], nz[i], alpha, mem->dzduxt+i, 0, 0, mem->qp_out->ux+i, 0, 1.0, mem->z_alg+i, 0, out->z+i, 0);
// }
// initialize weights
if(mem->sqp_iter[0]==0)
{
for (i = 0; i < N; i++)
blasfeo_dveccp(nx[i+1], out->pi+i, 0, work->weights_nlp_out->pi+i, 0);
for (i = 0; i <= N; i++)
blasfeo_dveccp(2*ni[i], out->lam+i, 0, work->weights_nlp_out->lam+i, 0);
}
// update weigths
for (i = 0; i < N; i++)
{
for(j=0; j<nx[i+1]; j++)
{
tmp0 = fabs(BLASFEO_DVECEL(work->weights_nlp_out->pi+i, j));
tmp1 = 0.5 * (tmp0 + fabs(BLASFEO_DVECEL(mem->qp_out->pi+i, j)));
BLASFEO_DVECEL(work->weights_nlp_out->pi+i, j) = tmp0>tmp1 ? tmp0 : tmp1;
}
}
for (i = 0; i <= N; i++)
{
for(j=0; j<2*ni[i]; j++)
{
tmp0 = fabs(BLASFEO_DVECEL(work->weights_nlp_out->lam+i, j));
tmp1 = 0.5 * (tmp0 + fabs(BLASFEO_DVECEL(mem->qp_out->lam+i, j)));
BLASFEO_DVECEL(work->weights_nlp_out->lam+i, j) = tmp0>tmp1 ? tmp0 : tmp1;
}
}
printf("\n\nmerit fun value\n");
double merit_fun0 = ocp_nlp_evaluate_merit_fun(config, dims, in, out, opts, mem, work);
double alpha_min = 0.2;
for (j=0; j<10 & alpha>alpha_min; j++)
{
for (i = 0; i <= N; i++)
blasfeo_daxpy(nv[i], alpha, mem->qp_out->ux+i, 0, out->ux+i, 0, work->tmp_nlp_out->ux+i, 0);
printf("\n%d tmp merit fun value\n", j);
double merit_fun1 = ocp_nlp_evaluate_merit_fun(config, dims, in, out, opts, mem, work);
if(merit_fun1 < merit_fun0)
{
break;
}
else
{
alpha *= 0.7;
}
}
printf("\nalpha %f\n", alpha);
#endif
#if defined(ACADOS_WITH_OPENMP)
#pragma omp parallel for
#endif
for (i = 0; i <= N; i++)
{
// (full) step in primal variables
blasfeo_daxpy(nv[i], alpha, mem->qp_out->ux + i, 0, out->ux + i, 0, out->ux + i, 0);
// update dual variables
if (i < N)
{
blasfeo_dvecsc(nx[i+1], 1.0-alpha, out->pi+i, 0);
blasfeo_daxpy(nx[i+1], alpha, mem->qp_out->pi+i, 0, out->pi+i, 0, out->pi+i, 0);
}
blasfeo_dvecsc(2*ni[i], 1.0-alpha, out->lam+i, 0);
blasfeo_daxpy(2*ni[i], alpha, mem->qp_out->lam+i, 0, out->lam+i, 0, out->lam+i, 0);
// update slack values
blasfeo_dvecsc(2*ni[i], 1.0-alpha, out->t+i, 0);
blasfeo_daxpy(2*ni[i], alpha, mem->qp_out->t+i, 0, out->t+i, 0, out->t+i, 0);
// linear update of algebraic variables using state and input sensitivity
if (i < N)
{
blasfeo_dgemv_t(nu[i]+nx[i], nz[i], alpha, mem->dzduxt+i, 0, 0, mem->qp_out->ux+i, 0, 1.0, mem->z_alg+i, 0, out->z+i, 0);
}
}
return;
}
/************************************************
* residuals
************************************************/
int ocp_nlp_res_calculate_size(ocp_nlp_dims *dims)
{
// extract dims
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
// int *nu = dims->nu;
int *ni = dims->ni;
int size = sizeof(ocp_nlp_res);
size += 3 * (N + 1) * sizeof(struct blasfeo_dvec); // res_g res_d res_m
size += 1 * N * sizeof(struct blasfeo_dvec); // res_b
for (int ii = 0; ii < N; ii++)
{
size += 1 * blasfeo_memsize_dvec(nv[ii]); // res_g
size += 1 * blasfeo_memsize_dvec(nx[ii + 1]); // res_b
size += 2 * blasfeo_memsize_dvec(2 * ni[ii]); // res_d res_m
}
size += 1 * blasfeo_memsize_dvec(nv[N]); // res_g
size += 2 * blasfeo_memsize_dvec(2 * ni[N]); // res_d res_m
size += 8; // initial align
size += 8; // blasfeo_struct align
size += 64; // blasfeo_mem align
// make_int_multiple_of(64, &size);
return size;
}
ocp_nlp_res *ocp_nlp_res_assign(ocp_nlp_dims *dims, void *raw_memory)
{
char *c_ptr = (char *) raw_memory;
// extract sizes
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
// int *nu = dims->nu;
int *ni = dims->ni;
// initial align
align_char_to(8, &c_ptr);
// struct
ocp_nlp_res *res = (ocp_nlp_res *) c_ptr;
c_ptr += sizeof(ocp_nlp_res);
// blasfeo_struct align
align_char_to(8, &c_ptr);
// res_g
assign_and_advance_blasfeo_dvec_structs(N + 1, &res->res_g, &c_ptr);
// res_b
assign_and_advance_blasfeo_dvec_structs(N, &res->res_b, &c_ptr);
// res_d
assign_and_advance_blasfeo_dvec_structs(N + 1, &res->res_d, &c_ptr);
// res_m
assign_and_advance_blasfeo_dvec_structs(N + 1, &res->res_m, &c_ptr);
// blasfeo_mem align
align_char_to(64, &c_ptr);
// res_g
for (int ii = 0; ii <= N; ii++)
{
assign_and_advance_blasfeo_dvec_mem(nv[ii], res->res_g + ii, &c_ptr);
}
// res_b
for (int ii = 0; ii < N; ii++)
{
assign_and_advance_blasfeo_dvec_mem(nx[ii + 1], res->res_b + ii, &c_ptr);
}
// res_d
for (int ii = 0; ii <= N; ii++)
{
assign_and_advance_blasfeo_dvec_mem(2 * ni[ii], res->res_d + ii, &c_ptr);
}
// res_m
for (int ii = 0; ii <= N; ii++)
{
assign_and_advance_blasfeo_dvec_mem(2 * ni[ii], res->res_m + ii, &c_ptr);
}
res->memsize = ocp_nlp_res_calculate_size(dims);
return res;
}
void ocp_nlp_res_compute(ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_res *res,
ocp_nlp_memory *mem)
{
// extract dims
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
int *nu = dims->nu;
int *ni = dims->ni;
double tmp_res;
// res_g
res->inf_norm_res_g = 0.0;
for (int ii = 0; ii <= N; ii++)
{
blasfeo_daxpy(nv[ii], -1.0, mem->ineq_adj + ii, 0, mem->cost_grad + ii, 0, res->res_g + ii,
0);
blasfeo_daxpy(nu[ii] + nx[ii], -1.0, mem->dyn_adj + ii, 0, res->res_g + ii, 0,
res->res_g + ii, 0);
blasfeo_dvecnrm_inf(nv[ii], res->res_g + ii, 0, &tmp_res);
res->inf_norm_res_g = tmp_res > res->inf_norm_res_g ? tmp_res : res->inf_norm_res_g;
}
// res_b
res->inf_norm_res_b = 0.0;
for (int ii = 0; ii < N; ii++)
{
blasfeo_dveccp(nx[ii + 1], mem->dyn_fun + ii, 0, res->res_b + ii, 0);
blasfeo_dvecnrm_inf(nx[ii + 1], res->res_b + ii, 0, &tmp_res);
res->inf_norm_res_b = tmp_res > res->inf_norm_res_b ? tmp_res : res->inf_norm_res_b;
}
// res_d
res->inf_norm_res_d = 0.0;
for (int ii = 0; ii <= N; ii++)
{
blasfeo_daxpy(2 * ni[ii], 1.0, out->t + ii, 0, mem->ineq_fun + ii, 0, res->res_d + ii, 0);
blasfeo_dvecnrm_inf(2 * ni[ii], res->res_d + ii, 0, &tmp_res);
res->inf_norm_res_d = tmp_res > res->inf_norm_res_d ? tmp_res : res->inf_norm_res_d;
}
// res_m
res->inf_norm_res_m = 0.0;
for (int ii = 0; ii <= N; ii++)
{
blasfeo_dvecmul(2 * ni[ii], out->lam + ii, 0, out->t + ii, 0, res->res_m + ii, 0);
blasfeo_dvecnrm_inf(2 * ni[ii], res->res_m + ii, 0, &tmp_res);
res->inf_norm_res_m = tmp_res > res->inf_norm_res_m ? tmp_res : res->inf_norm_res_m;
}
return;
}
|
main.c | /**
* program: dadafilterbank
*
* Purpose: connect to a ring buffer and create Sigproc output per TAB on request
*
* A ringbuffer page is interpreted as an array of Stokes I:
* [NTABS, NCHANNELS, padded_size] = [12, 1536, > 25000]
*
* Written for the AA-Alert project, ASTRON
*
* Author: Jisk Attema, Netherlands eScience Center
* Licencse: Apache v2.0
*/
#include <unistd.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <getopt.h>
#include <errno.h>
#include <signal.h>
#include "dada_hdu.h"
#include "ascii_header.h"
#include "filterbank.h"
#include "config.h"
#define MAXTABS 12
int output[MAXTABS];
FILE *runlog = NULL;
#define LOG(...) {fprintf(stdout, __VA_ARGS__); fprintf(runlog, __VA_ARGS__); fflush(stdout); fflush(runlog);}
// Hardcoded parameters
const unsigned int nchannels = 1536; // Must be divisible by 6 for the current transpose/inverse implementation
const unsigned int nbit = 8;
// Parameters read from ringbuffer header block (with default to lowest data rate)
int science_case = 3;
int science_mode = 2;
int padded_size = 12500;
double min_frequency;
double bandwidth;
double ra;
double dec;
char source_name[256];
double az_start;
double za_start;
double mjd_start;
// Derived parameters (with default to lowest data rate)
double tsamp = 1.024 / 12500;
int ntimes = 12500;
int ntabs = 1;
/**
* Open a connection to the ringbuffer
*
* @param {char *} key String containing the shared memory key as hexadecimal number
* @returns {hdu *} A connected HDU
*/
dada_hdu_t *init_ringbuffer(char *key) {
uint64_t nbufs;
int header_incomplete = 0;
multilog_t* multilog = NULL; // TODO: See if this is used in anyway by dada
// create hdu
dada_hdu_t *hdu = dada_hdu_create (multilog);
// init key
key_t shmkey;
sscanf(key, "%x", &shmkey);
dada_hdu_set_key(hdu, shmkey);
LOG("dadafilterbank SHMKEY: %s\n", key);
// connect
if (dada_hdu_connect (hdu) < 0) {
LOG("ERROR in dada_hdu_connect\n");
exit(EXIT_FAILURE);
}
// Make data buffers readable
if (dada_hdu_lock_read(hdu) < 0) {
LOG("ERROR in dada_hdu_open_view\n");
exit(EXIT_FAILURE);
}
// get write address
char *header;
uint64_t bufsz;
header = ipcbuf_get_next_read (hdu->header_block, &bufsz);
if (! header || ! bufsz) {
LOG("ERROR. Get next header block error\n");
exit(EXIT_FAILURE);
}
// parse header
if(ascii_header_get(header, "MIN_FREQUENCY", "%lf", &min_frequency) == -1) {
LOG("ERROR. MIN_FREQUENCY not set in dada buffer\n");
header_incomplete = 1;
}
if(ascii_header_get(header, "BW", "%lf", &bandwidth) == -1) {
LOG("ERROR. BW not set in dada buffer\n");
header_incomplete = 1;
}
if(ascii_header_get(header, "RA", "%lf", &ra) == -1) {
LOG("ERROR. RA not set in dada buffer\n");
header_incomplete = 1;
}
if(ascii_header_get(header, "DEC", "%lf", &dec) == -1) {
LOG("ERROR. DEC not set in dada buffer\n");
header_incomplete = 1;
}
if(ascii_header_get(header, "SOURCE", "%s", source_name) == -1) {
LOG("ERROR. SOURCE not set in dada buffer\n");
header_incomplete = 1;
}
if(ascii_header_get(header, "AZ_START", "%lf", &az_start) == -1) {
LOG("ERROR. AZ_START not set in dada buffer\n");
header_incomplete = 1;
}
if(ascii_header_get(header, "ZA_START", "%lf", &za_start) == -1) {
LOG("ERROR. ZA_START not set in dada buffer\n");
header_incomplete = 1;
}
if(ascii_header_get(header, "MJD_START", "%lf", &mjd_start) == -1) {
LOG("ERROR. MJD_START not set in dada buffer\n");
header_incomplete = 1;
}
if(ascii_header_get(header, "SCIENCE_CASE", "%i", &science_case) == -1) {
LOG("ERROR. SCIENCE_CASE not set in dada buffer\n");
header_incomplete = 1;
}
if(ascii_header_get(header, "SCIENCE_MODE", "%i", &science_mode) == -1) {
LOG("ERROR. SCIENCE_MODE not set in dada buffer\n");
header_incomplete = 1;
}
if(ascii_header_get(header, "PADDED_SIZE", "%i", &padded_size) == -1) {
LOG("ERROR. PADDED_SIZE not set in dada buffer\n");
header_incomplete = 1;
}
// tell the ringbuffer the header has been read
if (ipcbuf_mark_cleared(hdu->header_block) < 0) {
LOG("ERROR. Cannot mark the header as cleared\n");
exit(EXIT_FAILURE);
}
LOG("psrdada HEADER:\n%s\n", header);
if (header_incomplete) {
exit(EXIT_FAILURE);
}
return hdu;
}
/**
* Print commandline options
*/
void printOptions() {
printf("usage: dadafilterbank -k <hexadecimal key> -l <logfile> -n <filename prefix for dumps>\n");
printf("e.g. dadafits -k dada -l log.txt -n myobs\n");
return;
}
/**
* Parse commandline
*/
void parseOptions(int argc, char *argv[], char **key, char **prefix, char **logfile) {
int c;
int setk=0, setl=0, setn=0;
while((c=getopt(argc,argv,"b:c:m:k:l:n:"))!=-1) {
switch(c) {
// -k <hexadecimal_key>
case('k'):
*key = strdup(optarg);
setk=1;
break;
// -l log file
case('l'):
*logfile = strdup(optarg);
setl=1;
break;
// -n <filename prefix>
case('n'):
setn=1;
*prefix = strdup(optarg);
break;
// -h
case('h'):
printOptions();
exit(EXIT_SUCCESS);
break;
default:
fprintf(stderr, "Unknown option '%c'\n", c);
exit(EXIT_FAILURE);
break;
}
}
// All arguments are required
if (!setk || !setl || !setn) {
if (!setk) fprintf(stderr, "Error: DADA key not set\n");
if (!setl) fprintf(stderr, "Error: Log file not set\n");
if (!setn) fprintf(stderr, "Error: Filename prefix not set\n");
exit(EXIT_FAILURE);
}
}
void open_files(char *prefix, int ntabs) {
int tab;
for (tab=0; tab<ntabs; tab++) {
char fname[256];
if (ntabs == 1) {
snprintf(fname, 256, "%s.fil", prefix);
}
else {
snprintf(fname, 256, "%s_%02i.fil", prefix, tab);
}
// open filterbank file
output[tab] = filterbank_create(
fname, // filename
10, // int telescope_id,
15, // int machine_id,
source_name, // char *source_name,
az_start, // double az_start,
za_start, // double za_start,
ra, // double src_raj,
dec, // double src_dej,
mjd_start, // double tstart
tsamp, // double tsamp,
nbit, // int nbits,
min_frequency + bandwidth - (bandwidth / nchannels), // double fch1,
-1 * bandwidth / nchannels, // double foff,
nchannels, // int nchans,
ntabs, // int nbeams,
tab, // int ibeam
1 // int nifs
);
}
}
void close_files() {
int tab;
for (tab=0; tab<ntabs; tab++) {
filterbank_close(output[tab]);
}
}
/**
* Catch SIGINT then sync and close files before exiting
*/
void sigint_handler (int sig) {
LOG("SIGINT received, aborting\n");
int i;
for (i=0; i<ntabs; i++) {
if (output[i]) {
fsync(output[i]);
filterbank_close(output[i]);
}
}
exit(EXIT_FAILURE);
}
int main (int argc, char *argv[]) {
char *key;
char *logfile;
char *file_prefix;
// parse commandline
parseOptions(argc, argv, &key, &file_prefix, &logfile);
// set up logging
if (logfile) {
runlog = fopen(logfile, "w");
if (! runlog) {
LOG("ERROR opening logfile: %s\n", logfile);
exit(EXIT_FAILURE);
}
LOG("Logging to logfile: %s\n", logfile);
free (logfile);
}
// connect to ring buffer
dada_hdu_t *ringbuffer = init_ringbuffer(key);
ipcbuf_t *data_block = (ipcbuf_t *) ringbuffer->data_block;
ipcio_t *ipc = ringbuffer->data_block;
if (science_case == 3) {
// NTIMES (12500) per 1.024 seconds -> 0.00008192 [s]
ntimes = 12500;
tsamp = 1.024 / 12500;
ntabs = 9;
} else if (science_case == 4) {
// NTIMES (12500) per 1.024 seconds -> 0.00008192 [s]
ntimes = 12500;
tsamp = 1.024 / 12500;
ntabs = 12;
} else {
LOG("Error: Illegal science case '%i'", science_mode);
exit(EXIT_FAILURE);
}
LOG("dadafilterbank version: " VERSION "\n");
LOG("Science case = %i\n", science_case);
LOG("Filename prefix = %s\n", file_prefix);
if (science_mode == 0) {
// I + TAB
LOG("Science mode: 0 [I + TAB]\n");
} else if (science_mode == 2) {
// I + IAB
// Overwrite NTABS to be one
ntabs = 1;
LOG("Science mode: 2 [I + IAB]\n");
} else if (science_mode == 1 || science_mode == 3) {
LOG("Error: modes 1 [IQUV + TAB] / 3 [IQUV + IAB] not supported");
exit(EXIT_FAILURE);
} else {
LOG("Error: Illegal science mode '%i'", science_mode);
exit(EXIT_FAILURE);
}
// create filterbank files, and close files on C-c
open_files(file_prefix, ntabs);
signal(SIGINT, sigint_handler);
// for interaction with ringbuffer
uint64_t bufsz = ipc->curbufsz;
char *page = NULL;
// for processing a page
int tab, channel, time;
char *buffer = malloc(ntabs * ntimes * nchannels * sizeof(char));
int page_count = 0;
int quit = 0;
while(!quit && !ipcbuf_eod(data_block)) {
page = ipcbuf_get_next_read(data_block, &bufsz);
if (! page) {
quit = 1;
} else {
// page [NTABS, nchannels, time(padded_size)]
// file [time, nchannels]
for (tab = 0; tab < ntabs; tab++) {
int channel;
#pragma omp parallel for
for (channel = 0; channel < nchannels; channel+=6) {
const char *channelA = &page[(tab*nchannels + channel + 0)*padded_size];
const char *channelB = &page[(tab*nchannels + channel + 1)*padded_size];
const char *channelC = &page[(tab*nchannels + channel + 2)*padded_size];
const char *channelD = &page[(tab*nchannels + channel + 3)*padded_size];
const char *channelE = &page[(tab*nchannels + channel + 4)*padded_size];
const char *channelF = &page[(tab*nchannels + channel + 5)*padded_size];
int time;
for (time = 0; time < ntimes; time++) {
// reverse freq order to comply with header
buffer[tab*ntimes*nchannels + time*nchannels+nchannels-(channel+0)-1] = channelA[time];
buffer[tab*ntimes*nchannels + time*nchannels+nchannels-(channel+1)-1] = channelB[time];
buffer[tab*ntimes*nchannels + time*nchannels+nchannels-(channel+2)-1] = channelC[time];
buffer[tab*ntimes*nchannels + time*nchannels+nchannels-(channel+3)-1] = channelD[time];
buffer[tab*ntimes*nchannels + time*nchannels+nchannels-(channel+4)-1] = channelE[time];
buffer[tab*ntimes*nchannels + time*nchannels+nchannels-(channel+5)-1] = channelF[time];
}
}
ssize_t size = write(output[tab], &buffer[tab*ntimes*nchannels], sizeof(char) * ntimes * nchannels);
}
ipcbuf_mark_cleared((ipcbuf_t *) ipc);
page_count++;
}
}
if (ipcbuf_eod(data_block)) {
LOG("End of data received\n");
}
dada_hdu_unlock_read(ringbuffer);
dada_hdu_disconnect(ringbuffer);
free(buffer);
LOG("Read %i pages\n", page_count);
}
|
convolution_1x1_pack4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_sgemm_transform_kernel_pack4_neon(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch)
{
// interleave
// src = inch-outch
// dst = 4b-4a-inch/4a-outch/4b
#if __aarch64__
kernel_tm_pack4.create(2 * 1, inch / 4, (outch / 4) / 2 + (outch / 4) % 2, (size_t)4u * 16, 16);
#else
kernel_tm_pack4.create(1, inch / 4, outch / 4, (size_t)4u * 16, 16);
#endif
int q = 0;
#if __aarch64__
for (; q + 7 < outch; q += 8)
{
const float* k0 = (const float*)kernel + (q + 0) * inch;
const float* k1 = (const float*)kernel + (q + 1) * inch;
const float* k2 = (const float*)kernel + (q + 2) * inch;
const float* k3 = (const float*)kernel + (q + 3) * inch;
const float* k4 = (const float*)kernel + (q + 4) * inch;
const float* k5 = (const float*)kernel + (q + 5) * inch;
const float* k6 = (const float*)kernel + (q + 6) * inch;
const float* k7 = (const float*)kernel + (q + 7) * inch;
float* g0 = kernel_tm_pack4.channel(q / 8);
for (int p = 0; p + 3 < inch; p += 4)
{
g0[0] = k0[0];
g0[1] = k1[0];
g0[2] = k2[0];
g0[3] = k3[0];
g0[4] = k4[0];
g0[5] = k5[0];
g0[6] = k6[0];
g0[7] = k7[0];
g0[8] = k0[1];
g0[9] = k1[1];
g0[10] = k2[1];
g0[11] = k3[1];
g0[12] = k4[1];
g0[13] = k5[1];
g0[14] = k6[1];
g0[15] = k7[1];
g0[16] = k0[2];
g0[17] = k1[2];
g0[18] = k2[2];
g0[19] = k3[2];
g0[20] = k4[2];
g0[21] = k5[2];
g0[22] = k6[2];
g0[23] = k7[2];
g0[24] = k0[3];
g0[25] = k1[3];
g0[26] = k2[3];
g0[27] = k3[3];
g0[28] = k4[3];
g0[29] = k5[3];
g0[30] = k6[3];
g0[31] = k7[3];
k0 += 4;
k1 += 4;
k2 += 4;
k3 += 4;
k4 += 4;
k5 += 4;
k6 += 4;
k7 += 4;
g0 += 32;
}
}
#endif // __aarch64__
for (; q + 3 < outch; q += 4)
{
const float* k0 = (const float*)kernel + (q + 0) * inch;
const float* k1 = (const float*)kernel + (q + 1) * inch;
const float* k2 = (const float*)kernel + (q + 2) * inch;
const float* k3 = (const float*)kernel + (q + 3) * inch;
#if __aarch64__
float* g0 = kernel_tm_pack4.channel(q / 8 + (q % 8) / 4);
#else
float* g0 = kernel_tm_pack4.channel(q / 4);
#endif
for (int p = 0; p + 3 < inch; p += 4)
{
g0[0] = k0[0];
g0[1] = k1[0];
g0[2] = k2[0];
g0[3] = k3[0];
g0[4] = k0[1];
g0[5] = k1[1];
g0[6] = k2[1];
g0[7] = k3[1];
g0[8] = k0[2];
g0[9] = k1[2];
g0[10] = k2[2];
g0[11] = k3[2];
g0[12] = k0[3];
g0[13] = k1[3];
g0[14] = k2[3];
g0[15] = k3[3];
k0 += 4;
k1 += 4;
k2 += 4;
k3 += 4;
g0 += 16;
}
}
}
static void conv1x1s1_sgemm_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outch = top_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
const int size = w * h;
const float* bias = _bias;
// interleave
#if __aarch64__
Mat tmp(12, inch, size / 12 + (size % 12) / 8 + (size % 12 % 8) / 4 + (size % 12 % 4) / 2 + size % 12 % 2, elemsize, elempack, opt.workspace_allocator);
#else
Mat tmp(8, inch, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, elemsize, elempack, opt.workspace_allocator);
#endif
{
int nn_size;
int remain_size_start;
#if __aarch64__
nn_size = size / 12;
remain_size_start = nn_size * 12;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = ii * 12;
const float* img0 = bottom_blob.channel(0);
img0 += i * 4;
float* tmpptr = tmp.channel(i / 12);
for (int q = 0; q < inch; q++)
{
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0], #64 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v8.4s, v9.4s, v10.4s, v11.4s}, [%0] \n"
"st1 {v0.4s}, [%1], #16 \n"
"st1 {v4.4s}, [%1], #16 \n"
"st1 {v8.4s}, [%1], #16 \n"
"sub %0, %0, #128 \n"
"st1 {v1.4s}, [%1], #16 \n"
"st1 {v5.4s}, [%1], #16 \n"
"st1 {v9.4s}, [%1], #16 \n"
"st1 {v2.4s}, [%1], #16 \n"
"st1 {v6.4s}, [%1], #16 \n"
"st1 {v10.4s}, [%1], #16 \n"
"st1 {v3.4s}, [%1], #16 \n"
"st1 {v7.4s}, [%1], #16 \n"
"st1 {v11.4s}, [%1], #16 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11");
img0 += bottom_blob.cstep * 4;
}
}
#else
remain_size_start = 0;
#endif
nn_size = (size - remain_size_start) >> 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 8;
const float* img0 = bottom_blob.channel(0);
img0 += i * 4;
#if __aarch64__
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
#else
float* tmpptr = tmp.channel(i / 8);
#endif
for (int q = 0; q < inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0] \n"
"st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n"
"sub %0, %0, #64 \n"
"st1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7");
#else
asm volatile(
"pld [%0, #512] \n"
"vldm %0!, {d0-d7} \n"
"pld [%0, #512] \n"
"vldm %0, {d16-d23} \n"
// transpose 8x4
"vtrn.32 q0, q1 \n"
"vtrn.32 q2, q3 \n"
"vtrn.32 q8, q9 \n"
"vtrn.32 q10, q11 \n"
"vswp d1, d4 \n"
"vswp d3, d6 \n"
"vswp d17, d20 \n"
"vswp d19, d22 \n"
"vswp q1, q8 \n"
"vswp q3, q10 \n"
"vst1.f32 {d0-d3}, [%1 :128]! \n"
"vst1.f32 {d16-d19}, [%1 :128]! \n"
"sub %0, %0, #64 \n"
"vst1.f32 {d4-d7}, [%1 :128]! \n"
"vst1.f32 {d20-d23}, [%1 :128]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11");
#endif // __aarch64__
img0 += bottom_blob.cstep * 4;
}
}
remain_size_start += nn_size << 3;
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
const float* img0 = bottom_blob.channel(0);
img0 += i * 4;
#if __aarch64__
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
#else
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
#endif
for (int q = 0; q < inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0] \n"
"st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3");
#else
asm volatile(
"pld [%0, #512] \n"
"vldm %0, {d0-d7} \n"
"vstm %1!, {d0-d7} \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0", "q1", "q2", "q3");
#endif // __aarch64__
img0 += bottom_blob.cstep * 4;
}
}
remain_size_start += nn_size << 2;
nn_size = (size - remain_size_start) >> 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 2;
const float* img0 = bottom_blob.channel(0);
img0 += i * 4;
#if __aarch64__
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
#else
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2);
#endif
for (int q = 0; q < inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v0.4s, v1.4s}, [%0] \n"
"st1 {v0.4s, v1.4s}, [%1], #32 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1");
#else
asm volatile(
"pld [%0, #256] \n"
"vld1.f32 {d0-d3}, [%0 :128] \n"
"vst1.f32 {d0-d3}, [%1 :128]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0", "q1");
#endif // __aarch64__
img0 += bottom_blob.cstep * 4;
}
}
remain_size_start += nn_size << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
const float* img0 = bottom_blob.channel(0);
img0 += i * 4;
#if __aarch64__
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
#else
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
#endif
for (int q = 0; q < inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v0.4s}, [%0] \n"
"st1 {v0.4s}, [%1], #16 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0");
#else
asm volatile(
"pld [%0, #128] \n"
"vld1.f32 {d0-d1}, [%0 :128] \n"
"vst1.f32 {d0-d1}, [%1 :128]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0");
#endif // __aarch64__
img0 += bottom_blob.cstep * 4;
}
}
}
int nn_outch = 0;
int remain_outch_start = 0;
#if __ARM_NEON && __aarch64__
nn_outch = outch >> 1;
remain_outch_start = nn_outch << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 2;
float* outptr0 = top_blob.channel(p);
float* outptr1 = top_blob.channel(p + 1);
const float zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p * 4 : zeros;
int i = 0;
for (; i + 11 < size; i += 12)
{
const float* tmpptr = tmp.channel(i / 12);
const float* kptr01 = (const float*)kernel.channel(pp);
int nn = inch; // inch always > 0
asm volatile(
"ld1 {v0.4s, v1.4s}, [%10] \n"
"mov v8.16b, v0.16b \n"
"mov v9.16b, v0.16b \n"
"mov v10.16b, v0.16b \n"
"mov v11.16b, v0.16b \n"
"mov v12.16b, v0.16b \n"
"mov v13.16b, v0.16b \n"
"mov v14.16b, v0.16b \n"
"mov v15.16b, v0.16b \n"
"mov v16.16b, v0.16b \n"
"mov v17.16b, v0.16b \n"
"mov v18.16b, v0.16b \n"
"mov v19.16b, v0.16b \n"
"mov v20.16b, v1.16b \n"
"mov v21.16b, v1.16b \n"
"mov v22.16b, v1.16b \n"
"mov v23.16b, v1.16b \n"
"mov v24.16b, v1.16b \n"
"mov v25.16b, v1.16b \n"
"mov v26.16b, v1.16b \n"
"mov v27.16b, v1.16b \n"
"mov v28.16b, v1.16b \n"
"mov v29.16b, v1.16b \n"
"mov v30.16b, v1.16b \n"
"mov v31.16b, v1.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n" // w0011_01
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v4.4s, v0.s[1] \n"
"fmla v10.4s, v4.4s, v0.s[2] \n"
"fmla v11.4s, v4.4s, v0.s[3] \n"
"fmla v12.4s, v4.4s, v1.s[0] \n"
"fmla v13.4s, v4.4s, v1.s[1] \n"
"fmla v14.4s, v4.4s, v1.s[2] \n"
"fmla v15.4s, v4.4s, v1.s[3] \n"
"fmla v16.4s, v4.4s, v2.s[0] \n"
"fmla v17.4s, v4.4s, v2.s[1] \n"
"fmla v18.4s, v4.4s, v2.s[2] \n"
"fmla v19.4s, v4.4s, v2.s[3] \n"
"fmla v20.4s, v5.4s, v0.s[0] \n"
"fmla v21.4s, v5.4s, v0.s[1] \n"
"fmla v22.4s, v5.4s, v0.s[2] \n"
"fmla v23.4s, v5.4s, v0.s[3] \n"
"fmla v24.4s, v5.4s, v1.s[0] \n"
"fmla v25.4s, v5.4s, v1.s[1] \n"
"fmla v26.4s, v5.4s, v1.s[2] \n"
"fmla v27.4s, v5.4s, v1.s[3] \n"
"fmla v28.4s, v5.4s, v2.s[0] \n"
"fmla v29.4s, v5.4s, v2.s[1] \n"
"fmla v30.4s, v5.4s, v2.s[2] \n"
"fmla v31.4s, v5.4s, v2.s[3] \n"
"fmla v8.4s, v6.4s, v3.s[0] \n"
"fmla v9.4s, v6.4s, v3.s[1] \n"
"fmla v10.4s, v6.4s, v3.s[2] \n"
"fmla v11.4s, v6.4s, v3.s[3] \n"
"fmla v20.4s, v7.4s, v3.s[0] \n"
"fmla v21.4s, v7.4s, v3.s[1] \n"
"fmla v22.4s, v7.4s, v3.s[2] \n"
"fmla v23.4s, v7.4s, v3.s[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n"
"fmla v12.4s, v6.4s, v0.s[0] \n"
"fmla v13.4s, v6.4s, v0.s[1] \n"
"fmla v14.4s, v6.4s, v0.s[2] \n"
"fmla v15.4s, v6.4s, v0.s[3] \n"
"fmla v16.4s, v6.4s, v1.s[0] \n"
"fmla v17.4s, v6.4s, v1.s[1] \n"
"fmla v18.4s, v6.4s, v1.s[2] \n"
"fmla v19.4s, v6.4s, v1.s[3] \n"
"fmla v24.4s, v7.4s, v0.s[0] \n"
"fmla v25.4s, v7.4s, v0.s[1] \n"
"fmla v26.4s, v7.4s, v0.s[2] \n"
"fmla v27.4s, v7.4s, v0.s[3] \n"
"fmla v28.4s, v7.4s, v1.s[0] \n"
"fmla v29.4s, v7.4s, v1.s[1] \n"
"fmla v30.4s, v7.4s, v1.s[2] \n"
"fmla v31.4s, v7.4s, v1.s[3] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n" // w2233_01
"fmla v8.4s, v4.4s, v2.s[0] \n"
"fmla v9.4s, v4.4s, v2.s[1] \n"
"fmla v10.4s, v4.4s, v2.s[2] \n"
"fmla v11.4s, v4.4s, v2.s[3] \n"
"fmla v12.4s, v4.4s, v3.s[0] \n"
"fmla v13.4s, v4.4s, v3.s[1] \n"
"fmla v14.4s, v4.4s, v3.s[2] \n"
"fmla v15.4s, v4.4s, v3.s[3] \n"
"fmla v20.4s, v5.4s, v2.s[0] \n"
"fmla v21.4s, v5.4s, v2.s[1] \n"
"fmla v22.4s, v5.4s, v2.s[2] \n"
"fmla v23.4s, v5.4s, v2.s[3] \n"
"fmla v24.4s, v5.4s, v3.s[0] \n"
"fmla v25.4s, v5.4s, v3.s[1] \n"
"fmla v26.4s, v5.4s, v3.s[2] \n"
"fmla v27.4s, v5.4s, v3.s[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n"
"fmla v16.4s, v4.4s, v0.s[0] \n"
"fmla v17.4s, v4.4s, v0.s[1] \n"
"fmla v18.4s, v4.4s, v0.s[2] \n"
"fmla v19.4s, v4.4s, v0.s[3] \n"
"fmla v28.4s, v5.4s, v0.s[0] \n"
"fmla v29.4s, v5.4s, v0.s[1] \n"
"fmla v30.4s, v5.4s, v0.s[2] \n"
"fmla v31.4s, v5.4s, v0.s[3] \n"
"fmla v8.4s, v6.4s, v1.s[0] \n"
"fmla v9.4s, v6.4s, v1.s[1] \n"
"fmla v10.4s, v6.4s, v1.s[2] \n"
"fmla v11.4s, v6.4s, v1.s[3] \n"
"fmla v12.4s, v6.4s, v2.s[0] \n"
"fmla v13.4s, v6.4s, v2.s[1] \n"
"fmla v14.4s, v6.4s, v2.s[2] \n"
"fmla v15.4s, v6.4s, v2.s[3] \n"
"fmla v16.4s, v6.4s, v3.s[0] \n"
"fmla v17.4s, v6.4s, v3.s[1] \n"
"fmla v18.4s, v6.4s, v3.s[2] \n"
"fmla v19.4s, v6.4s, v3.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v20.4s, v7.4s, v1.s[0] \n"
"fmla v21.4s, v7.4s, v1.s[1] \n"
"fmla v22.4s, v7.4s, v1.s[2] \n"
"fmla v23.4s, v7.4s, v1.s[3] \n"
"fmla v24.4s, v7.4s, v2.s[0] \n"
"fmla v25.4s, v7.4s, v2.s[1] \n"
"fmla v26.4s, v7.4s, v2.s[2] \n"
"fmla v27.4s, v7.4s, v2.s[3] \n"
"fmla v28.4s, v7.4s, v3.s[0] \n"
"fmla v29.4s, v7.4s, v3.s[1] \n"
"fmla v30.4s, v7.4s, v3.s[2] \n"
"fmla v31.4s, v7.4s, v3.s[3] \n"
"bne 0b \n"
"st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n"
"st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n"
"st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n"
"st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n"
"st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%2], #64 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(tmpptr), // %3
"=r"(kptr01) // %4
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(tmpptr),
"4"(kptr01),
"r"(biasptr) // %10
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 7 < size; i += 8)
{
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
const float* kptr01 = (const float*)kernel.channel(pp);
int nn = inch; // inch always > 0
asm volatile(
"ld1 {v0.4s, v1.4s}, [%10] \n"
"mov v16.16b, v0.16b \n"
"mov v17.16b, v0.16b \n"
"mov v18.16b, v0.16b \n"
"mov v19.16b, v0.16b \n"
"mov v20.16b, v0.16b \n"
"mov v21.16b, v0.16b \n"
"mov v22.16b, v0.16b \n"
"mov v23.16b, v0.16b \n"
"mov v24.16b, v1.16b \n"
"mov v25.16b, v1.16b \n"
"mov v26.16b, v1.16b \n"
"mov v27.16b, v1.16b \n"
"mov v28.16b, v1.16b \n"
"mov v29.16b, v1.16b \n"
"mov v30.16b, v1.16b \n"
"mov v31.16b, v1.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r0 r1 r2 r3
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v8.4s, v2.s[0] \n"
"fmla v19.4s, v8.4s, v3.s[0] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n" // r4 r5 r6 r7
"fmla v20.4s, v8.4s, v4.s[0] \n"
"fmla v21.4s, v8.4s, v5.s[0] \n"
"fmla v22.4s, v8.4s, v6.s[0] \n"
"fmla v23.4s, v8.4s, v7.s[0] \n"
"fmla v24.4s, v9.4s, v0.s[0] \n"
"fmla v25.4s, v9.4s, v1.s[0] \n"
"fmla v26.4s, v9.4s, v2.s[0] \n"
"fmla v27.4s, v9.4s, v3.s[0] \n"
"fmla v28.4s, v9.4s, v4.s[0] \n"
"fmla v29.4s, v9.4s, v5.s[0] \n"
"fmla v30.4s, v9.4s, v6.s[0] \n"
"fmla v31.4s, v9.4s, v7.s[0] \n"
"fmla v16.4s, v10.4s, v0.s[1] \n"
"fmla v17.4s, v10.4s, v1.s[1] \n"
"fmla v18.4s, v10.4s, v2.s[1] \n"
"fmla v19.4s, v10.4s, v3.s[1] \n"
"fmla v20.4s, v10.4s, v4.s[1] \n"
"fmla v21.4s, v10.4s, v5.s[1] \n"
"fmla v22.4s, v10.4s, v6.s[1] \n"
"fmla v23.4s, v10.4s, v7.s[1] \n"
"fmla v24.4s, v11.4s, v0.s[1] \n"
"fmla v25.4s, v11.4s, v1.s[1] \n"
"fmla v26.4s, v11.4s, v2.s[1] \n"
"fmla v27.4s, v11.4s, v3.s[1] \n"
"fmla v28.4s, v11.4s, v4.s[1] \n"
"fmla v29.4s, v11.4s, v5.s[1] \n"
"fmla v30.4s, v11.4s, v6.s[1] \n"
"fmla v31.4s, v11.4s, v7.s[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01
"fmla v16.4s, v12.4s, v0.s[2] \n"
"fmla v17.4s, v12.4s, v1.s[2] \n"
"fmla v18.4s, v12.4s, v2.s[2] \n"
"fmla v19.4s, v12.4s, v3.s[2] \n"
"fmla v20.4s, v12.4s, v4.s[2] \n"
"fmla v21.4s, v12.4s, v5.s[2] \n"
"fmla v22.4s, v12.4s, v6.s[2] \n"
"fmla v23.4s, v12.4s, v7.s[2] \n"
"fmla v24.4s, v13.4s, v0.s[2] \n"
"fmla v25.4s, v13.4s, v1.s[2] \n"
"fmla v26.4s, v13.4s, v2.s[2] \n"
"fmla v27.4s, v13.4s, v3.s[2] \n"
"fmla v28.4s, v13.4s, v4.s[2] \n"
"fmla v29.4s, v13.4s, v5.s[2] \n"
"fmla v30.4s, v13.4s, v6.s[2] \n"
"fmla v31.4s, v13.4s, v7.s[2] \n"
"fmla v16.4s, v14.4s, v0.s[3] \n"
"fmla v17.4s, v14.4s, v1.s[3] \n"
"fmla v18.4s, v14.4s, v2.s[3] \n"
"fmla v19.4s, v14.4s, v3.s[3] \n"
"fmla v20.4s, v14.4s, v4.s[3] \n"
"fmla v21.4s, v14.4s, v5.s[3] \n"
"fmla v22.4s, v14.4s, v6.s[3] \n"
"fmla v23.4s, v14.4s, v7.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v24.4s, v15.4s, v0.s[3] \n"
"fmla v25.4s, v15.4s, v1.s[3] \n"
"fmla v26.4s, v15.4s, v2.s[3] \n"
"fmla v27.4s, v15.4s, v3.s[3] \n"
"fmla v28.4s, v15.4s, v4.s[3] \n"
"fmla v29.4s, v15.4s, v5.s[3] \n"
"fmla v30.4s, v15.4s, v6.s[3] \n"
"fmla v31.4s, v15.4s, v7.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n"
"st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n"
"st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n"
"st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%2], #64 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(tmpptr), // %3
"=r"(kptr01) // %4
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(tmpptr),
"4"(kptr01),
"r"(biasptr) // %10
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 3 < size; i += 4)
{
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* kptr01 = (const float*)kernel.channel(pp);
int nn = inch; // inch always > 0
asm volatile(
"ld1 {v0.4s, v1.4s}, [%10] \n"
"mov v16.16b, v0.16b \n"
"mov v17.16b, v0.16b \n"
"mov v18.16b, v0.16b \n"
"mov v19.16b, v0.16b \n"
"mov v20.16b, v1.16b \n"
"mov v21.16b, v1.16b \n"
"mov v22.16b, v1.16b \n"
"mov v23.16b, v1.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r0 r1 r2 r3
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v8.4s, v2.s[0] \n"
"fmla v19.4s, v8.4s, v3.s[0] \n"
"fmla v20.4s, v9.4s, v0.s[0] \n"
"fmla v21.4s, v9.4s, v1.s[0] \n"
"fmla v22.4s, v9.4s, v2.s[0] \n"
"fmla v23.4s, v9.4s, v3.s[0] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01
"fmla v16.4s, v10.4s, v0.s[1] \n"
"fmla v17.4s, v10.4s, v1.s[1] \n"
"fmla v18.4s, v10.4s, v2.s[1] \n"
"fmla v19.4s, v10.4s, v3.s[1] \n"
"fmla v20.4s, v11.4s, v0.s[1] \n"
"fmla v21.4s, v11.4s, v1.s[1] \n"
"fmla v22.4s, v11.4s, v2.s[1] \n"
"fmla v23.4s, v11.4s, v3.s[1] \n"
"fmla v16.4s, v12.4s, v0.s[2] \n"
"fmla v17.4s, v12.4s, v1.s[2] \n"
"fmla v18.4s, v12.4s, v2.s[2] \n"
"fmla v19.4s, v12.4s, v3.s[2] \n"
"fmla v20.4s, v13.4s, v0.s[2] \n"
"fmla v21.4s, v13.4s, v1.s[2] \n"
"fmla v22.4s, v13.4s, v2.s[2] \n"
"fmla v23.4s, v13.4s, v3.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v14.4s, v0.s[3] \n"
"fmla v17.4s, v14.4s, v1.s[3] \n"
"fmla v18.4s, v14.4s, v2.s[3] \n"
"fmla v19.4s, v14.4s, v3.s[3] \n"
"fmla v20.4s, v15.4s, v0.s[3] \n"
"fmla v21.4s, v15.4s, v1.s[3] \n"
"fmla v22.4s, v15.4s, v2.s[3] \n"
"fmla v23.4s, v15.4s, v3.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n"
"st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(tmpptr), // %3
"=r"(kptr01) // %4
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(tmpptr),
"4"(kptr01),
"r"(biasptr) // %10
: "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
}
for (; i + 1 < size; i += 2)
{
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const float* kptr01 = (const float*)kernel.channel(pp);
int nn = inch; // inch always > 0
asm volatile(
"ld1 {v0.4s, v1.4s}, [%10] \n"
"mov v16.16b, v0.16b \n"
"mov v17.16b, v0.16b \n"
"mov v18.16b, v1.16b \n"
"mov v19.16b, v1.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.4s, v1.4s}, [%3], #32 \n" // r0 r1
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v9.4s, v0.s[0] \n"
"fmla v19.4s, v9.4s, v1.s[0] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01
"fmla v16.4s, v10.4s, v0.s[1] \n"
"fmla v17.4s, v10.4s, v1.s[1] \n"
"fmla v18.4s, v11.4s, v0.s[1] \n"
"fmla v19.4s, v11.4s, v1.s[1] \n"
"fmla v16.4s, v12.4s, v0.s[2] \n"
"fmla v17.4s, v12.4s, v1.s[2] \n"
"fmla v18.4s, v13.4s, v0.s[2] \n"
"fmla v19.4s, v13.4s, v1.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v14.4s, v0.s[3] \n"
"fmla v17.4s, v14.4s, v1.s[3] \n"
"fmla v18.4s, v15.4s, v0.s[3] \n"
"fmla v19.4s, v15.4s, v1.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s}, [%1], #32 \n"
"st1 {v18.4s, v19.4s}, [%2], #32 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(tmpptr), // %3
"=r"(kptr01) // %4
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(tmpptr),
"4"(kptr01),
"r"(biasptr) // %10
: "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19");
}
for (; i < size; i++)
{
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const float* kptr01 = (const float*)kernel.channel(pp);
int nn = inch; // inch always > 0
asm volatile(
"ld1 {v16.4s, v17.4s}, [%10] \n"
"0: \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v0.4s}, [%3], #16 \n" // r0
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v9.4s, v0.s[0] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01
"fmla v16.4s, v10.4s, v0.s[1] \n"
"fmla v17.4s, v11.4s, v0.s[1] \n"
"fmla v16.4s, v12.4s, v0.s[2] \n"
"fmla v17.4s, v13.4s, v0.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v14.4s, v0.s[3] \n"
"fmla v17.4s, v15.4s, v0.s[3] \n"
"bne 0b \n"
"st1 {v16.4s}, [%1], #16 \n"
"st1 {v17.4s}, [%2], #16 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(tmpptr), // %3
"=r"(kptr01) // %4
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(tmpptr),
"4"(kptr01),
"r"(biasptr) // %10
: "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17");
}
}
#endif // __ARM_NEON && __aarch64__
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
float* outptr0 = top_blob.channel(p);
const float zeros[4] = {0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p * 4 : zeros;
int i = 0;
#if __aarch64__
for (; i + 11 < size; i += 12)
{
float* tmpptr = tmp.channel(i / 12);
const float* kptr0 = (const float*)kernel.channel(p / 2 + p % 2);
int nn = inch; // inch always > 0
asm volatile(
"ld1 {v0.4s}, [%8] \n"
"mov v8.16b, v0.16b \n"
"mov v9.16b, v0.16b \n"
"mov v10.16b, v0.16b \n"
"mov v11.16b, v0.16b \n"
"mov v12.16b, v0.16b \n"
"mov v13.16b, v0.16b \n"
"mov v14.16b, v0.16b \n"
"mov v15.16b, v0.16b \n"
"mov v16.16b, v0.16b \n"
"mov v17.16b, v0.16b \n"
"mov v18.16b, v0.16b \n"
"mov v19.16b, v0.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n" // w0123_0
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v4.4s, v0.s[1] \n"
"fmla v10.4s, v4.4s, v0.s[2] \n"
"fmla v11.4s, v4.4s, v0.s[3] \n"
"fmla v12.4s, v4.4s, v1.s[0] \n"
"fmla v13.4s, v4.4s, v1.s[1] \n"
"fmla v14.4s, v4.4s, v1.s[2] \n"
"fmla v15.4s, v4.4s, v1.s[3] \n"
"fmla v16.4s, v4.4s, v2.s[0] \n"
"fmla v17.4s, v4.4s, v2.s[1] \n"
"fmla v18.4s, v4.4s, v2.s[2] \n"
"fmla v19.4s, v4.4s, v2.s[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n"
"fmla v8.4s, v5.4s, v3.s[0] \n"
"fmla v9.4s, v5.4s, v3.s[1] \n"
"fmla v10.4s, v5.4s, v3.s[2] \n"
"fmla v11.4s, v5.4s, v3.s[3] \n"
"fmla v12.4s, v5.4s, v20.s[0] \n"
"fmla v13.4s, v5.4s, v20.s[1] \n"
"fmla v14.4s, v5.4s, v20.s[2] \n"
"fmla v15.4s, v5.4s, v20.s[3] \n"
"fmla v16.4s, v5.4s, v21.s[0] \n"
"fmla v17.4s, v5.4s, v21.s[1] \n"
"fmla v18.4s, v5.4s, v21.s[2] \n"
"fmla v19.4s, v5.4s, v21.s[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n"
"fmla v8.4s, v6.4s, v22.s[0] \n"
"fmla v9.4s, v6.4s, v22.s[1] \n"
"fmla v10.4s, v6.4s, v22.s[2] \n"
"fmla v11.4s, v6.4s, v22.s[3] \n"
"fmla v12.4s, v6.4s, v23.s[0] \n"
"fmla v13.4s, v6.4s, v23.s[1] \n"
"fmla v14.4s, v6.4s, v23.s[2] \n"
"fmla v15.4s, v6.4s, v23.s[3] \n"
"fmla v16.4s, v6.4s, v24.s[0] \n"
"fmla v17.4s, v6.4s, v24.s[1] \n"
"fmla v18.4s, v6.4s, v24.s[2] \n"
"fmla v19.4s, v6.4s, v24.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v7.4s, v25.s[0] \n"
"fmla v9.4s, v7.4s, v25.s[1] \n"
"fmla v10.4s, v7.4s, v25.s[2] \n"
"fmla v11.4s, v7.4s, v25.s[3] \n"
"fmla v12.4s, v7.4s, v26.s[0] \n"
"fmla v13.4s, v7.4s, v26.s[1] \n"
"fmla v14.4s, v7.4s, v26.s[2] \n"
"fmla v15.4s, v7.4s, v26.s[3] \n"
"fmla v16.4s, v7.4s, v27.s[0] \n"
"fmla v17.4s, v7.4s, v27.s[1] \n"
"fmla v18.4s, v7.4s, v27.s[2] \n"
"fmla v19.4s, v7.4s, v27.s[3] \n"
"bne 0b \n"
"st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n"
"st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27");
}
#endif
for (; i + 7 < size; i += 8)
{
#if __aarch64__
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
const float* kptr0 = (const float*)kernel.channel(p / 2 + p % 2);
#else
float* tmpptr = tmp.channel(i / 8);
const float* kptr0 = (const float*)kernel.channel(p);
#endif
int nn = inch; // inch always > 0
#if __aarch64__
asm volatile(
"ld1 {v0.4s}, [%8] \n"
"mov v16.16b, v0.16b \n"
"mov v17.16b, v0.16b \n"
"mov v18.16b, v0.16b \n"
"mov v19.16b, v0.16b \n"
"mov v20.16b, v0.16b \n"
"mov v21.16b, v0.16b \n"
"mov v22.16b, v0.16b \n"
"mov v23.16b, v0.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" // r0 r1 r2 r3
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v8.4s, v2.s[0] \n"
"fmla v19.4s, v8.4s, v3.s[0] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%2], #64 \n" // r4 r5 r6 r7
"fmla v20.4s, v8.4s, v4.s[0] \n"
"fmla v21.4s, v8.4s, v5.s[0] \n"
"fmla v22.4s, v8.4s, v6.s[0] \n"
"fmla v23.4s, v8.4s, v7.s[0] \n"
"fmla v16.4s, v9.4s, v0.s[1] \n"
"fmla v17.4s, v9.4s, v1.s[1] \n"
"fmla v18.4s, v9.4s, v2.s[1] \n"
"fmla v19.4s, v9.4s, v3.s[1] \n"
"fmla v20.4s, v9.4s, v4.s[1] \n"
"fmla v21.4s, v9.4s, v5.s[1] \n"
"fmla v22.4s, v9.4s, v6.s[1] \n"
"fmla v23.4s, v9.4s, v7.s[1] \n"
"fmla v16.4s, v10.4s, v0.s[2] \n"
"fmla v17.4s, v10.4s, v1.s[2] \n"
"fmla v18.4s, v10.4s, v2.s[2] \n"
"fmla v19.4s, v10.4s, v3.s[2] \n"
"fmla v20.4s, v10.4s, v4.s[2] \n"
"fmla v21.4s, v10.4s, v5.s[2] \n"
"fmla v22.4s, v10.4s, v6.s[2] \n"
"fmla v23.4s, v10.4s, v7.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v11.4s, v0.s[3] \n"
"fmla v17.4s, v11.4s, v1.s[3] \n"
"fmla v18.4s, v11.4s, v2.s[3] \n"
"fmla v19.4s, v11.4s, v3.s[3] \n"
"fmla v20.4s, v11.4s, v4.s[3] \n"
"fmla v21.4s, v11.4s, v5.s[3] \n"
"fmla v22.4s, v11.4s, v6.s[3] \n"
"fmla v23.4s, v11.4s, v7.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n"
"st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
#else
asm volatile(
"vld1.f32 {d0-d1}, [%8] \n"
"vmov q8, q0 \n"
"vmov q9, q0 \n"
"vmov q10, q0 \n"
"vmov q11, q0 \n"
"vmov q12, q0 \n"
"vmov q13, q0 \n"
"vmov q14, q0 \n"
"vmov q15, q0 \n"
"0: \n"
"pld [%2, #512] \n"
"vldm %2!, {d0-d7} \n"
"pld [%3, #512] \n"
"vldm %3!, {d8-d15} \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d0[1] \n"
"vmla.f32 q10, q4, d1[0] \n"
"vmla.f32 q11, q4, d1[1] \n"
"vmla.f32 q12, q4, d2[0] \n"
"vmla.f32 q13, q4, d2[1] \n"
"vmla.f32 q14, q4, d3[0] \n"
"vmla.f32 q15, q4, d3[1] \n"
"vmla.f32 q8, q5, d4[0] \n"
"vmla.f32 q9, q5, d4[1] \n"
"vmla.f32 q10, q5, d5[0] \n"
"vmla.f32 q11, q5, d5[1] \n"
"vmla.f32 q12, q5, d6[0] \n"
"vmla.f32 q13, q5, d6[1] \n"
"vmla.f32 q14, q5, d7[0] \n"
"vmla.f32 q15, q5, d7[1] \n"
"pld [%2, #512] \n"
"vldm %2!, {d0-d7} \n"
"vmla.f32 q8, q6, d0[0] \n"
"vmla.f32 q9, q6, d0[1] \n"
"vmla.f32 q10, q6, d1[0] \n"
"vmla.f32 q11, q6, d1[1] \n"
"vmla.f32 q12, q6, d2[0] \n"
"vmla.f32 q13, q6, d2[1] \n"
"vmla.f32 q14, q6, d3[0] \n"
"vmla.f32 q15, q6, d3[1] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q7, d4[0] \n"
"vmla.f32 q9, q7, d4[1] \n"
"vmla.f32 q10, q7, d5[0] \n"
"vmla.f32 q11, q7, d5[1] \n"
"vmla.f32 q12, q7, d6[0] \n"
"vmla.f32 q13, q7, d6[1] \n"
"vmla.f32 q14, q7, d7[0] \n"
"vmla.f32 q15, q7, d7[1] \n"
"bne 0b \n"
"vstm %1!, {d16-d23} \n"
"vstm %1!, {d24-d31} \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif
}
for (; i + 3 < size; i += 4)
{
#if __aarch64__
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* kptr0 = (const float*)kernel.channel(p / 2 + p % 2);
#else
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
const float* kptr0 = (const float*)kernel.channel(p);
#endif
int nn = inch; // inch always > 0
#if __aarch64__
asm volatile(
"ld1 {v0.4s}, [%8] \n"
"mov v16.16b, v0.16b \n"
"mov v17.16b, v0.16b \n"
"mov v18.16b, v0.16b \n"
"mov v19.16b, v0.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" // r0 r1 r2 r3
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v8.4s, v2.s[0] \n"
"fmla v19.4s, v8.4s, v3.s[0] \n"
"fmla v16.4s, v9.4s, v0.s[1] \n"
"fmla v17.4s, v9.4s, v1.s[1] \n"
"fmla v18.4s, v9.4s, v2.s[1] \n"
"fmla v19.4s, v9.4s, v3.s[1] \n"
"fmla v16.4s, v10.4s, v0.s[2] \n"
"fmla v17.4s, v10.4s, v1.s[2] \n"
"fmla v18.4s, v10.4s, v2.s[2] \n"
"fmla v19.4s, v10.4s, v3.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v11.4s, v0.s[3] \n"
"fmla v17.4s, v11.4s, v1.s[3] \n"
"fmla v18.4s, v11.4s, v2.s[3] \n"
"fmla v19.4s, v11.4s, v3.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19");
#else
asm volatile(
"vld1.f32 {d0-d1}, [%8] \n"
"vmov q8, q0 \n"
"vmov q9, q0 \n"
"vmov q10, q0 \n"
"vmov q11, q0 \n"
"0: \n"
"pld [%2, #512] \n"
"vldm %2!, {d0-d7} \n"
"pld [%3, #512] \n"
"vldm %3!, {d8-d15} \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d2[0] \n"
"vmla.f32 q10, q4, d4[0] \n"
"vmla.f32 q11, q4, d6[0] \n"
"vmla.f32 q8, q5, d0[1] \n"
"vmla.f32 q9, q5, d2[1] \n"
"vmla.f32 q10, q5, d4[1] \n"
"vmla.f32 q11, q5, d6[1] \n"
"vmla.f32 q8, q6, d1[0] \n"
"vmla.f32 q9, q6, d3[0] \n"
"vmla.f32 q10, q6, d5[0] \n"
"vmla.f32 q11, q6, d7[0] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q7, d1[1] \n"
"vmla.f32 q9, q7, d3[1] \n"
"vmla.f32 q10, q7, d5[1] \n"
"vmla.f32 q11, q7, d7[1] \n"
"bne 0b \n"
"vstm %1!, {d16-d23} \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11");
#endif
}
for (; i + 1 < size; i += 2)
{
#if __aarch64__
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const float* kptr0 = (const float*)kernel.channel(p / 2 + p % 2);
#else
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2);
const float* kptr0 = (const float*)kernel.channel(p);
#endif
int nn = inch; // inch always > 0
#if __aarch64__
asm volatile(
"ld1 {v0.4s}, [%8] \n"
"mov v16.16b, v0.16b \n"
"mov v17.16b, v0.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4s, v1.4s}, [%2], #32 \n" // r0 r1
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v16.4s, v9.4s, v0.s[1] \n"
"fmla v17.4s, v9.4s, v1.s[1] \n"
"fmla v16.4s, v10.4s, v0.s[2] \n"
"fmla v17.4s, v10.4s, v1.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v11.4s, v0.s[3] \n"
"fmla v17.4s, v11.4s, v1.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s}, [%1], #32 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v16", "v17");
#else
asm volatile(
"vld1.f32 {d0-d1}, [%8] \n"
"vmov q8, q0 \n"
"vmov q9, q0 \n"
"0: \n"
"pld [%2, #256] \n"
"vld1.f32 {d0-d3}, [%2 :128]! \n"
"pld [%3, #512] \n"
"vldm %3!, {d8-d15} \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d2[0] \n"
"vmla.f32 q8, q5, d0[1] \n"
"vmla.f32 q9, q5, d2[1] \n"
"vmla.f32 q8, q6, d1[0] \n"
"vmla.f32 q9, q6, d3[0] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q7, d1[1] \n"
"vmla.f32 q9, q7, d3[1] \n"
"bne 0b \n"
"vst1.f32 {d16-d19}, [%1 :128]! \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "q0", "q1", "q4", "q5", "q6", "q7", "q8", "q9");
#endif
}
for (; i < size; i++)
{
#if __aarch64__
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const float* kptr0 = (const float*)kernel.channel(p / 2 + p % 2);
#else
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
const float* kptr0 = (const float*)kernel.channel(p);
#endif
int nn = inch; // inch always > 0
#if __aarch64__
asm volatile(
"ld1 {v16.4s}, [%8] \n"
"0: \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.4s}, [%2], #16 \n" // r0
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v16.4s, v9.4s, v0.s[1] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v10.4s, v0.s[2] \n"
"fmla v16.4s, v11.4s, v0.s[3] \n"
"bne 0b \n"
"st1 {v16.4s}, [%1], #16 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v16");
#else
asm volatile(
"vld1.f32 {d16-d17}, [%8] \n"
"0: \n"
"pld [%2, #128] \n"
"vld1.f32 {d0-d1}, [%2 :128]! \n"
"pld [%3, #512] \n"
"vldm %3!, {d8-d15} \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q8, q5, d0[1] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q6, d1[0] \n"
"vmla.f32 q8, q7, d1[1] \n"
"bne 0b \n"
"vst1.f32 {d16-d17}, [%1 :128]! \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8");
#endif
}
}
// // NOTE sgemm
// for (; p<outch; p++)
// {
// Mat out0 = top_blob.channel(p);
//
// const float bias0 = bias ? bias[p] : 0.f;
//
// float* outptr0 = out0;
//
// for (int i=0; i<size; i++)
// {
// float sum = bias0;
//
// const float* kptr = _kernel.channel(p);
//
// for (int q=0; q<inch; q++)
// {
// const float* img0 = bottom_blob.channel(q);
//
// sum += img0[i] * kptr[0];
// kptr ++;
// }
//
// outptr0[i] = sum;
// }
// }
}
static void conv1x1s2_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = (w - 2 * outw + w) * 4;
Mat bottom_blob_shrinked;
bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < channels; p++)
{
const float* r0 = bottom_blob.channel(p);
float* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
float32x4_t _v = vld1q_f32(r0);
vst1q_f32(outptr, _v);
r0 += 8;
outptr += 4;
}
r0 += tailstep;
}
}
conv1x1s1_sgemm_pack4_neon(bottom_blob_shrinked, top_blob, kernel, _bias, opt);
}
|
c-omp.c | /* This file contains routines to construct GNU OpenMP constructs,
called from parsing in the C and C++ front ends.
Copyright (C) 2005, 2007, 2008, 2009, 2010, 2011
Free Software Foundation, Inc.
Contributed by Richard Henderson <rth@redhat.com>,
Diego Novillo <dnovillo@redhat.com>.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tree.h"
#include "c-common.h"
#include "gimple.h" /* For create_tmp_var_raw. */
#include "langhooks.h"
/* Complete a #pragma omp master construct. STMT is the structured-block
that follows the pragma. LOC is the l*/
tree
c_finish_omp_master (location_t loc, tree stmt)
{
tree t = add_stmt (build1 (OMP_MASTER, void_type_node, stmt));
SET_EXPR_LOCATION (t, loc);
return t;
}
/* Complete a #pragma omp critical construct. STMT is the structured-block
that follows the pragma, NAME is the identifier in the pragma, or null
if it was omitted. LOC is the location of the #pragma. */
tree
c_finish_omp_critical (location_t loc, tree body, tree name)
{
tree stmt = make_node (OMP_CRITICAL);
TREE_TYPE (stmt) = void_type_node;
OMP_CRITICAL_BODY (stmt) = body;
OMP_CRITICAL_NAME (stmt) = name;
SET_EXPR_LOCATION (stmt, loc);
return add_stmt (stmt);
}
/* Complete a #pragma omp ordered construct. STMT is the structured-block
that follows the pragma. LOC is the location of the #pragma. */
tree
c_finish_omp_ordered (location_t loc, tree stmt)
{
tree t = build1 (OMP_ORDERED, void_type_node, stmt);
SET_EXPR_LOCATION (t, loc);
return add_stmt (t);
}
/* Complete a #pragma omp barrier construct. LOC is the location of
the #pragma. */
void
c_finish_omp_barrier (location_t loc)
{
tree x;
x = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER);
x = build_call_expr_loc (loc, x, 0);
add_stmt (x);
}
/* Complete a #pragma omp taskwait construct. LOC is the location of the
pragma. */
void
c_finish_omp_taskwait (location_t loc)
{
tree x;
x = builtin_decl_explicit (BUILT_IN_GOMP_TASKWAIT);
x = build_call_expr_loc (loc, x, 0);
add_stmt (x);
}
/* Complete a #pragma omp taskyield construct. LOC is the location of the
pragma. */
void
c_finish_omp_taskyield (location_t loc)
{
tree x;
x = builtin_decl_explicit (BUILT_IN_GOMP_TASKYIELD);
x = build_call_expr_loc (loc, x, 0);
add_stmt (x);
}
/* Complete a #pragma omp atomic construct. For CODE OMP_ATOMIC
the expression to be implemented atomically is LHS opcode= RHS.
For OMP_ATOMIC_READ V = LHS, for OMP_ATOMIC_CAPTURE_{NEW,OLD} LHS
opcode= RHS with the new or old content of LHS returned.
LOC is the location of the atomic statement. The value returned
is either error_mark_node (if the construct was erroneous) or an
OMP_ATOMIC* node which should be added to the current statement
tree with add_stmt. */
tree
c_finish_omp_atomic (location_t loc, enum tree_code code,
enum tree_code opcode, tree lhs, tree rhs,
tree v, tree lhs1, tree rhs1)
{
tree x, type, addr;
if (lhs == error_mark_node || rhs == error_mark_node
|| v == error_mark_node || lhs1 == error_mark_node
|| rhs1 == error_mark_node)
return error_mark_node;
/* ??? According to one reading of the OpenMP spec, complex type are
supported, but there are no atomic stores for any architecture.
But at least icc 9.0 doesn't support complex types here either.
And lets not even talk about vector types... */
type = TREE_TYPE (lhs);
if (!INTEGRAL_TYPE_P (type)
&& !POINTER_TYPE_P (type)
&& !SCALAR_FLOAT_TYPE_P (type))
{
error_at (loc, "invalid expression type for %<#pragma omp atomic%>");
return error_mark_node;
}
/* ??? Validate that rhs does not overlap lhs. */
/* Take and save the address of the lhs. From then on we'll reference it
via indirection. */
addr = build_unary_op (loc, ADDR_EXPR, lhs, 0);
if (addr == error_mark_node)
return error_mark_node;
addr = save_expr (addr);
if (TREE_CODE (addr) != SAVE_EXPR
&& (TREE_CODE (addr) != ADDR_EXPR
|| TREE_CODE (TREE_OPERAND (addr, 0)) != VAR_DECL))
{
/* Make sure LHS is simple enough so that goa_lhs_expr_p can recognize
it even after unsharing function body. */
tree var = create_tmp_var_raw (TREE_TYPE (addr), NULL);
DECL_CONTEXT (var) = current_function_decl;
addr = build4 (TARGET_EXPR, TREE_TYPE (addr), var, addr, NULL, NULL);
}
lhs = build_indirect_ref (loc, addr, RO_NULL);
if (code == OMP_ATOMIC_READ)
{
x = build1 (OMP_ATOMIC_READ, type, addr);
SET_EXPR_LOCATION (x, loc);
return build_modify_expr (loc, v, NULL_TREE, NOP_EXPR,
loc, x, NULL_TREE);
return x;
}
/* There are lots of warnings, errors, and conversions that need to happen
in the course of interpreting a statement. Use the normal mechanisms
to do this, and then take it apart again. */
x = build_modify_expr (input_location, lhs, NULL_TREE, opcode,
input_location, rhs, NULL_TREE);
if (x == error_mark_node)
return error_mark_node;
gcc_assert (TREE_CODE (x) == MODIFY_EXPR);
rhs = TREE_OPERAND (x, 1);
/* Punt the actual generation of atomic operations to common code. */
if (code == OMP_ATOMIC)
type = void_type_node;
x = build2 (code, type, addr, rhs);
SET_EXPR_LOCATION (x, loc);
/* Generally it is hard to prove lhs1 and lhs are the same memory
location, just diagnose different variables. */
if (rhs1
&& TREE_CODE (rhs1) == VAR_DECL
&& TREE_CODE (lhs) == VAR_DECL
&& rhs1 != lhs)
{
if (code == OMP_ATOMIC)
error_at (loc, "%<#pragma omp atomic update%> uses two different variables for memory");
else
error_at (loc, "%<#pragma omp atomic capture%> uses two different variables for memory");
return error_mark_node;
}
if (code != OMP_ATOMIC)
{
/* Generally it is hard to prove lhs1 and lhs are the same memory
location, just diagnose different variables. */
if (lhs1 && TREE_CODE (lhs1) == VAR_DECL && TREE_CODE (lhs) == VAR_DECL)
{
if (lhs1 != lhs)
{
error_at (loc, "%<#pragma omp atomic capture%> uses two different variables for memory");
return error_mark_node;
}
}
x = build_modify_expr (loc, v, NULL_TREE, NOP_EXPR,
loc, x, NULL_TREE);
if (rhs1 && rhs1 != lhs)
{
tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, 0);
if (rhs1addr == error_mark_node)
return error_mark_node;
x = omit_one_operand_loc (loc, type, x, rhs1addr);
}
if (lhs1 && lhs1 != lhs)
{
tree lhs1addr = build_unary_op (loc, ADDR_EXPR, lhs1, 0);
if (lhs1addr == error_mark_node)
return error_mark_node;
if (code == OMP_ATOMIC_CAPTURE_OLD)
x = omit_one_operand_loc (loc, type, x, lhs1addr);
else
{
x = save_expr (x);
x = omit_two_operands_loc (loc, type, x, x, lhs1addr);
}
}
}
else if (rhs1 && rhs1 != lhs)
{
tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, 0);
if (rhs1addr == error_mark_node)
return error_mark_node;
x = omit_one_operand_loc (loc, type, x, rhs1addr);
}
return x;
}
/* Complete a #pragma omp flush construct. We don't do anything with
the variable list that the syntax allows. LOC is the location of
the #pragma. */
void
c_finish_omp_flush (location_t loc)
{
tree x;
x = builtin_decl_explicit (BUILT_IN_SYNC_SYNCHRONIZE);
x = build_call_expr_loc (loc, x, 0);
add_stmt (x);
}
/* Check and canonicalize #pragma omp for increment expression.
Helper function for c_finish_omp_for. */
static tree
check_omp_for_incr_expr (location_t loc, tree exp, tree decl)
{
tree t;
if (!INTEGRAL_TYPE_P (TREE_TYPE (exp))
|| TYPE_PRECISION (TREE_TYPE (exp)) < TYPE_PRECISION (TREE_TYPE (decl)))
return error_mark_node;
if (exp == decl)
return build_int_cst (TREE_TYPE (exp), 0);
switch (TREE_CODE (exp))
{
CASE_CONVERT:
t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
if (t != error_mark_node)
return fold_convert_loc (loc, TREE_TYPE (exp), t);
break;
case MINUS_EXPR:
t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
if (t != error_mark_node)
return fold_build2_loc (loc, MINUS_EXPR,
TREE_TYPE (exp), t, TREE_OPERAND (exp, 1));
break;
case PLUS_EXPR:
t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
if (t != error_mark_node)
return fold_build2_loc (loc, PLUS_EXPR,
TREE_TYPE (exp), t, TREE_OPERAND (exp, 1));
t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 1), decl);
if (t != error_mark_node)
return fold_build2_loc (loc, PLUS_EXPR,
TREE_TYPE (exp), TREE_OPERAND (exp, 0), t);
break;
case COMPOUND_EXPR:
{
/* cp_build_modify_expr forces preevaluation of the RHS to make
sure that it is evaluated before the lvalue-rvalue conversion
is applied to the LHS. Reconstruct the original expression. */
tree op0 = TREE_OPERAND (exp, 0);
if (TREE_CODE (op0) == TARGET_EXPR
&& !VOID_TYPE_P (TREE_TYPE (op0)))
{
tree op1 = TREE_OPERAND (exp, 1);
tree temp = TARGET_EXPR_SLOT (op0);
if (TREE_CODE_CLASS (TREE_CODE (op1)) == tcc_binary
&& TREE_OPERAND (op1, 1) == temp)
{
op1 = copy_node (op1);
TREE_OPERAND (op1, 1) = TARGET_EXPR_INITIAL (op0);
return check_omp_for_incr_expr (loc, op1, decl);
}
}
break;
}
default:
break;
}
return error_mark_node;
}
/* Validate and emit code for the OpenMP directive #pragma omp for.
DECLV is a vector of iteration variables, for each collapsed loop.
INITV, CONDV and INCRV are vectors containing initialization
expressions, controlling predicates and increment expressions.
BODY is the body of the loop and PRE_BODY statements that go before
the loop. */
tree
c_finish_omp_for (location_t locus, tree declv, tree initv, tree condv,
tree incrv, tree body, tree pre_body)
{
location_t elocus;
bool fail = false;
int i;
gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (initv));
gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (condv));
gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (incrv));
for (i = 0; i < TREE_VEC_LENGTH (declv); i++)
{
tree decl = TREE_VEC_ELT (declv, i);
tree init = TREE_VEC_ELT (initv, i);
tree cond = TREE_VEC_ELT (condv, i);
tree incr = TREE_VEC_ELT (incrv, i);
elocus = locus;
if (EXPR_HAS_LOCATION (init))
elocus = EXPR_LOCATION (init);
/* Validate the iteration variable. */
if (!INTEGRAL_TYPE_P (TREE_TYPE (decl))
&& TREE_CODE (TREE_TYPE (decl)) != POINTER_TYPE)
{
error_at (elocus, "invalid type for iteration variable %qE", decl);
fail = true;
}
/* In the case of "for (int i = 0...)", init will be a decl. It should
have a DECL_INITIAL that we can turn into an assignment. */
if (init == decl)
{
elocus = DECL_SOURCE_LOCATION (decl);
init = DECL_INITIAL (decl);
if (init == NULL)
{
error_at (elocus, "%qE is not initialized", decl);
init = integer_zero_node;
fail = true;
}
init = build_modify_expr (elocus, decl, NULL_TREE, NOP_EXPR,
/* FIXME diagnostics: This should
be the location of the INIT. */
elocus,
init,
NULL_TREE);
}
gcc_assert (TREE_CODE (init) == MODIFY_EXPR);
gcc_assert (TREE_OPERAND (init, 0) == decl);
if (cond == NULL_TREE)
{
error_at (elocus, "missing controlling predicate");
fail = true;
}
else
{
bool cond_ok = false;
if (EXPR_HAS_LOCATION (cond))
elocus = EXPR_LOCATION (cond);
if (TREE_CODE (cond) == LT_EXPR
|| TREE_CODE (cond) == LE_EXPR
|| TREE_CODE (cond) == GT_EXPR
|| TREE_CODE (cond) == GE_EXPR
|| TREE_CODE (cond) == NE_EXPR
|| TREE_CODE (cond) == EQ_EXPR)
{
tree op0 = TREE_OPERAND (cond, 0);
tree op1 = TREE_OPERAND (cond, 1);
/* 2.5.1. The comparison in the condition is computed in
the type of DECL, otherwise the behavior is undefined.
For example:
long n; int i;
i < n;
according to ISO will be evaluated as:
(long)i < n;
We want to force:
i < (int)n; */
if (TREE_CODE (op0) == NOP_EXPR
&& decl == TREE_OPERAND (op0, 0))
{
TREE_OPERAND (cond, 0) = TREE_OPERAND (op0, 0);
TREE_OPERAND (cond, 1)
= fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl),
TREE_OPERAND (cond, 1));
}
else if (TREE_CODE (op1) == NOP_EXPR
&& decl == TREE_OPERAND (op1, 0))
{
TREE_OPERAND (cond, 1) = TREE_OPERAND (op1, 0);
TREE_OPERAND (cond, 0)
= fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl),
TREE_OPERAND (cond, 0));
}
if (decl == TREE_OPERAND (cond, 0))
cond_ok = true;
else if (decl == TREE_OPERAND (cond, 1))
{
TREE_SET_CODE (cond,
swap_tree_comparison (TREE_CODE (cond)));
TREE_OPERAND (cond, 1) = TREE_OPERAND (cond, 0);
TREE_OPERAND (cond, 0) = decl;
cond_ok = true;
}
if (TREE_CODE (cond) == NE_EXPR
|| TREE_CODE (cond) == EQ_EXPR)
{
if (!INTEGRAL_TYPE_P (TREE_TYPE (decl)))
cond_ok = false;
else if (operand_equal_p (TREE_OPERAND (cond, 1),
TYPE_MIN_VALUE (TREE_TYPE (decl)),
0))
TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR
? GT_EXPR : LE_EXPR);
else if (operand_equal_p (TREE_OPERAND (cond, 1),
TYPE_MAX_VALUE (TREE_TYPE (decl)),
0))
TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR
? LT_EXPR : GE_EXPR);
else
cond_ok = false;
}
}
if (!cond_ok)
{
error_at (elocus, "invalid controlling predicate");
fail = true;
}
}
if (incr == NULL_TREE)
{
error_at (elocus, "missing increment expression");
fail = true;
}
else
{
bool incr_ok = false;
if (EXPR_HAS_LOCATION (incr))
elocus = EXPR_LOCATION (incr);
/* Check all the valid increment expressions: v++, v--, ++v, --v,
v = v + incr, v = incr + v and v = v - incr. */
switch (TREE_CODE (incr))
{
case POSTINCREMENT_EXPR:
case PREINCREMENT_EXPR:
case POSTDECREMENT_EXPR:
case PREDECREMENT_EXPR:
if (TREE_OPERAND (incr, 0) != decl)
break;
incr_ok = true;
if (POINTER_TYPE_P (TREE_TYPE (decl))
&& TREE_OPERAND (incr, 1))
{
tree t = fold_convert_loc (elocus,
sizetype, TREE_OPERAND (incr, 1));
if (TREE_CODE (incr) == POSTDECREMENT_EXPR
|| TREE_CODE (incr) == PREDECREMENT_EXPR)
t = fold_build1_loc (elocus, NEGATE_EXPR, sizetype, t);
t = fold_build_pointer_plus (decl, t);
incr = build2 (MODIFY_EXPR, void_type_node, decl, t);
}
break;
case MODIFY_EXPR:
if (TREE_OPERAND (incr, 0) != decl)
break;
if (TREE_OPERAND (incr, 1) == decl)
break;
if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR
&& (TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl
|| TREE_OPERAND (TREE_OPERAND (incr, 1), 1) == decl))
incr_ok = true;
else if ((TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR
|| (TREE_CODE (TREE_OPERAND (incr, 1))
== POINTER_PLUS_EXPR))
&& TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl)
incr_ok = true;
else
{
tree t = check_omp_for_incr_expr (elocus,
TREE_OPERAND (incr, 1),
decl);
if (t != error_mark_node)
{
incr_ok = true;
t = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, t);
incr = build2 (MODIFY_EXPR, void_type_node, decl, t);
}
}
break;
default:
break;
}
if (!incr_ok)
{
error_at (elocus, "invalid increment expression");
fail = true;
}
}
TREE_VEC_ELT (initv, i) = init;
TREE_VEC_ELT (incrv, i) = incr;
}
if (fail)
return NULL;
else
{
tree t = make_node (OMP_FOR);
TREE_TYPE (t) = void_type_node;
OMP_FOR_INIT (t) = initv;
OMP_FOR_COND (t) = condv;
OMP_FOR_INCR (t) = incrv;
OMP_FOR_BODY (t) = body;
OMP_FOR_PRE_BODY (t) = pre_body;
SET_EXPR_LOCATION (t, locus);
return add_stmt (t);
}
}
/* Divide CLAUSES into two lists: those that apply to a parallel
construct, and those that apply to a work-sharing construct. Place
the results in *PAR_CLAUSES and *WS_CLAUSES respectively. In
addition, add a nowait clause to the work-sharing list. LOC is the
location of the OMP_PARALLEL*. */
void
c_split_parallel_clauses (location_t loc, tree clauses,
tree *par_clauses, tree *ws_clauses)
{
tree next;
*par_clauses = NULL;
*ws_clauses = build_omp_clause (loc, OMP_CLAUSE_NOWAIT);
for (; clauses ; clauses = next)
{
next = OMP_CLAUSE_CHAIN (clauses);
switch (OMP_CLAUSE_CODE (clauses))
{
case OMP_CLAUSE_PRIVATE:
case OMP_CLAUSE_SHARED:
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_LASTPRIVATE:
case OMP_CLAUSE_REDUCTION:
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE_IF:
case OMP_CLAUSE_NUM_THREADS:
case OMP_CLAUSE_DEFAULT:
OMP_CLAUSE_CHAIN (clauses) = *par_clauses;
*par_clauses = clauses;
break;
case OMP_CLAUSE_SCHEDULE:
case OMP_CLAUSE_ORDERED:
case OMP_CLAUSE_COLLAPSE:
OMP_CLAUSE_CHAIN (clauses) = *ws_clauses;
*ws_clauses = clauses;
break;
default:
gcc_unreachable ();
}
}
}
/* True if OpenMP sharing attribute of DECL is predetermined. */
enum omp_clause_default_kind
c_omp_predetermined_sharing (tree decl)
{
/* Variables with const-qualified type having no mutable member
are predetermined shared. */
if (TREE_READONLY (decl))
return OMP_CLAUSE_DEFAULT_SHARED;
return OMP_CLAUSE_DEFAULT_UNSPECIFIED;
}
|
pointer2ArrayRegular.c | // regular array types can use &array
//
static void makea ()
{
static int colidx[100];
int i,j;
#pragma omp parallel for private(i)
for (i = 1; i <= 100; i++)
colidx[i] = 0;
}
int main()
{
makea();
}
|
scheduled-clauseModificado.c | #include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#endif
int main(int argc, char **argv) {
int i, n = 16,chunk, a[n],suma=0;
int modifier;
omp_sched_t kind;
if(argc < 2) {
fprintf(stderr,"\nFalta chunk \n");
exit(-1);
}
chunk = atoi(argv[1]);
for (i=0; i<n; i++) a[i] = i;
#pragma omp parallel
{
#pragma omp for firstprivate(suma) \
lastprivate(suma) schedule(dynamic,chunk)
for (i=0; i<n; i++)
{
suma = suma + a[i];
printf(" thread %d suma a[%d] suma=%d \n",
omp_get_thread_num(),i,suma);
}
#pragma omp single
{
printf("dyn-var: %d \n",omp_get_dynamic());
printf("nthreads_var: %d \n",omp_get_max_threads());
printf("thread_limit_var: %d \n",omp_get_thread_limit());
printf("thread_limit_var: %d \n",omp_get_thread_limit());
omp_get_schedule(&kind, &modifier);
printf("get_schedulre: kind %d,modifier %d \n",kind,modifier);
}
}
printf("Fuera de 'parallel for' suma=%d\n",suma);
printf("Fuera de la región paralell: \n");
printf("dyn-var: %d \n",omp_get_dynamic());
printf("nthreads_var: %d \n",omp_get_max_threads());
printf("thread_limit_var: %d \n",omp_get_thread_limit());
printf("thread_limit_var: %d \n",omp_get_thread_limit());
omp_get_schedule(&kind, &modifier);
printf("get_schedulre: kind %d,modifier %d \n",kind,modifier);
}
|
two_step_v_p_strategy.h | //
// Project Name: KratosPFEMFluidDynamicsApplication $
// Last modified by: $Author: AFranci $
// Date: $Date: January 2016 $
// Revision: $Revision: 0.0 $
//
//
#ifndef KRATOS_TWO_STEP_V_P_STRATEGY_H
#define KRATOS_TWO_STEP_V_P_STRATEGY_H
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/deprecated_variables.h"
#include "includes/cfd_variables.h"
#include "utilities/openmp_utils.h"
#include "processes/process.h"
#include "solving_strategies/schemes/scheme.h"
#include "solving_strategies/strategies/solving_strategy.h"
#include "custom_utilities/mesher_utilities.hpp"
#include "custom_utilities/boundary_normals_calculation_utilities.hpp"
#include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme.h"
/* #include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme_slip.h" */
#include "solving_strategies/builder_and_solvers/residualbased_elimination_builder_and_solver.h"
#include "solving_strategies/builder_and_solvers/residualbased_elimination_builder_and_solver_componentwise.h"
#include "solving_strategies/builder_and_solvers/residualbased_block_builder_and_solver.h"
#include "custom_utilities/solver_settings.h"
#include "custom_strategies/strategies/gauss_seidel_linear_strategy.h"
#include "pfem_fluid_dynamics_application_variables.h"
#include <stdio.h>
#include <math.h>
namespace Kratos {
///@addtogroup PFEMFluidDynamicsApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
template<class TSparseSpace,
class TDenseSpace,
class TLinearSolver
>
class TwoStepVPStrategy : public SolvingStrategy<TSparseSpace,TDenseSpace,TLinearSolver>
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION(TwoStepVPStrategy);
/// Counted pointer of TwoStepVPStrategy
//typedef boost::shared_ptr< TwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver> > Pointer;
typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
typedef typename BaseType::TDataType TDataType;
//typedef typename BaseType::DofSetType DofSetType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer StrategyPointerType;
typedef TwoStepVPSolverSettings<TSparseSpace,TDenseSpace,TLinearSolver> SolverSettingsType;
///@}
///@name Life Cycle
///@{
TwoStepVPStrategy(ModelPart& rModelPart,
SolverSettingsType& rSolverConfig):
BaseType(rModelPart)
{
InitializeStrategy(rSolverConfig);
}
TwoStepVPStrategy(ModelPart& rModelPart,
/*SolverConfiguration<TSparseSpace, TDenseSpace, TLinearSolver>& rSolverConfig,*/
typename TLinearSolver::Pointer pVelocityLinearSolver,
typename TLinearSolver::Pointer pPressureLinearSolver,
bool ReformDofSet = true,
double VelTol = 0.0001,
double PresTol = 0.0001,
int MaxPressureIterations = 1,// Only for predictor-corrector
unsigned int TimeOrder = 2,
unsigned int DomainSize = 2):
BaseType(rModelPart), // Move Mesh flag, pass as input?
mVelocityTolerance(VelTol),
mPressureTolerance(PresTol),
mMaxPressureIter(MaxPressureIterations),
mDomainSize(DomainSize),
mTimeOrder(TimeOrder),
mReformDofSet(ReformDofSet)
{
KRATOS_TRY;
BaseType::SetEchoLevel(1);
// Check that input parameters are reasonable and sufficient.
this->Check();
bool CalculateNormDxFlag = true;
bool ReformDofAtEachIteration = false; // DofSet modifiaction is managed by the fractional step strategy, auxiliary strategies should not modify the DofSet directly.
// Additional Typedefs
//typedef typename Kratos::VariableComponent<Kratos::VectorComponentAdaptor<Kratos::array_1d<double, 3 > > > VarComponent;
typedef typename BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer BuilderSolverTypePointer;
typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
//initializing fractional velocity solution step
typedef Scheme< TSparseSpace, TDenseSpace > SchemeType;
typename SchemeType::Pointer pScheme;
typename SchemeType::Pointer Temp = typename SchemeType::Pointer(new ResidualBasedIncrementalUpdateStaticScheme< TSparseSpace, TDenseSpace > ());
pScheme.swap(Temp);
//CONSTRUCTION OF VELOCITY
BuilderSolverTypePointer vel_build = BuilderSolverTypePointer(new ResidualBasedEliminationBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver > (pVelocityLinearSolver));
/* BuilderSolverTypePointer vel_build = BuilderSolverTypePointer(new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver > (pVelocityLinearSolver)); */
this->mpMomentumStrategy = typename BaseType::Pointer(new GaussSeidelLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver > (rModelPart, pScheme, pVelocityLinearSolver, vel_build, ReformDofAtEachIteration, CalculateNormDxFlag));
this->mpMomentumStrategy->SetEchoLevel( BaseType::GetEchoLevel() );
vel_build->SetCalculateReactionsFlag(false);
/* BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new ResidualBasedEliminationBuilderAndSolverComponentwise<TSparseSpace, TDenseSpace, TLinearSolver, Variable<double> >(pPressureLinearSolver, PRESSURE)); */
BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver >(pPressureLinearSolver));
this->mpPressureStrategy = typename BaseType::Pointer(new GaussSeidelLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver > (rModelPart, pScheme, pPressureLinearSolver, pressure_build, ReformDofAtEachIteration, CalculateNormDxFlag));
this->mpPressureStrategy->SetEchoLevel( BaseType::GetEchoLevel() );
pressure_build->SetCalculateReactionsFlag(false);
KRATOS_CATCH("");
}
/// Destructor.
virtual ~TwoStepVPStrategy(){}
int Check() override
{
KRATOS_TRY;
// Check elements and conditions in the model part
int ierr = BaseType::Check();
if (ierr != 0) return ierr;
if(DELTA_TIME.Key() == 0)
KRATOS_THROW_ERROR(std::runtime_error,"DELTA_TIME Key is 0. Check that the application was correctly registered.","");
if(BDF_COEFFICIENTS.Key() == 0)
KRATOS_THROW_ERROR(std::runtime_error,"BDF_COEFFICIENTS Key is 0. Check that the application was correctly registered.","");
ModelPart& rModelPart = BaseType::GetModelPart();
if ( mTimeOrder == 2 && rModelPart.GetBufferSize() < 3 )
KRATOS_THROW_ERROR(std::invalid_argument,"Buffer size too small for fractional step strategy (BDF2), needed 3, got ",rModelPart.GetBufferSize());
if ( mTimeOrder == 1 && rModelPart.GetBufferSize() < 2 )
KRATOS_THROW_ERROR(std::invalid_argument,"Buffer size too small for fractional step strategy (Backward Euler), needed 2, got ",rModelPart.GetBufferSize());
const ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo();
for ( ModelPart::ElementIterator itEl = rModelPart.ElementsBegin(); itEl != rModelPart.ElementsEnd(); ++itEl )
{
ierr = itEl->Check(rCurrentProcessInfo);
if (ierr != 0) break;
}
/* for ( ModelPart::ConditionIterator itCond = rModelPart.ConditionsBegin(); itCond != rModelPart.ConditionsEnd(); ++itCond) */
/* { */
/* ierr = itCond->Check(rCurrentProcessInfo); */
/* if (ierr != 0) break; */
/* } */
return ierr;
KRATOS_CATCH("");
}
double Solve() override
{
// Initialize BDF2 coefficients
ModelPart& rModelPart = BaseType::GetModelPart();
this->SetTimeCoefficients(rModelPart.GetProcessInfo());
double NormDp = 0.0;
ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo();
double currentTime = rCurrentProcessInfo[TIME];
double timeInterval = rCurrentProcessInfo[DELTA_TIME];
bool timeIntervalChanged= rCurrentProcessInfo[TIME_INTERVAL_CHANGED];
unsigned int maxNonLinearIterations=mMaxPressureIter;
KRATOS_INFO("TwoStepVPStrategy") << "\n Solve with two_step_vp strategy at t="<< currentTime<<"s"<<std::endl;
if(timeIntervalChanged==true && currentTime>10*timeInterval ){
maxNonLinearIterations*=2;
}
if(currentTime<10*timeInterval){
if ( BaseType::GetEchoLevel() > 1)
std::cout << "within the first 10 time steps, I consider the given iteration number x3"<< std::endl;
maxNonLinearIterations*=3;
}
if(currentTime<20*timeInterval && currentTime>=10*timeInterval){
if ( BaseType::GetEchoLevel() > 1)
std::cout << "within the second 10 time steps, I consider the given iteration number x2"<< std::endl;
maxNonLinearIterations*=2;
}
bool momentumConverged = true;
bool continuityConverged = false;
bool fixedTimeStep=false;
bool momentumAlreadyConverged=false;
bool continuityAlreadyConverged=false;
/* boost::timer solve_step_time; */
// Iterative solution for pressure
/* unsigned int timeStep = rCurrentProcessInfo[STEP]; */
/* if(timeStep==1){ */
/* unsigned int iter=0; */
/* continuityConverged = this->SolveContinuityIteration(iter,maxNonLinearIterations); */
/* }else if(timeStep==2){ */
/* unsigned int iter=0; */
/* momentumConverged = this->SolveMomentumIteration(iter,maxNonLinearIterations,fixedTimeStep); */
/* }else{ */
for(unsigned int it = 0; it < maxNonLinearIterations; ++it)
{
if ( BaseType::GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0)
std::cout << "----- > iteration: " << it << std::endl;
momentumConverged = this->SolveMomentumIteration(it,maxNonLinearIterations,fixedTimeStep);
this->UpdateTopology(rModelPart, BaseType::GetEchoLevel());
if((momentumConverged==true || it==maxNonLinearIterations-1) && momentumAlreadyConverged==false){
std::ofstream myfile;
myfile.open ("momentumConvergedIteration.txt",std::ios::app);
myfile << currentTime << "\t" << it << "\n";
myfile.close();
momentumAlreadyConverged=true;
}
if((continuityConverged==true || it==maxNonLinearIterations-1) && continuityAlreadyConverged==false){
std::ofstream myfile;
myfile.open ("continuityConvergedIteration.txt",std::ios::app);
myfile << currentTime << "\t" << it << "\n";
myfile.close();
continuityAlreadyConverged=true;
}
if( fixedTimeStep==false){
continuityConverged = this->SolveContinuityIteration(it,maxNonLinearIterations);
}
if(it==maxNonLinearIterations-1 || ((continuityConverged && momentumConverged) && it>2)){
//this->ComputeErrorL2Norm();
//this->ComputeErrorL2NormCasePoiseuille();
this->UpdateStressStrain();
std::ofstream myfile;
myfile.open ("maxConvergedIteration.txt",std::ios::app);
myfile << currentTime << "\t" << it << "\n";
myfile.close();
}
if ( (continuityConverged && momentumConverged) && it>2)
{
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE,false);
rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE,false);
KRATOS_INFO("TwoStepVPStrategy") << "V-P strategy converged in " << it+1 << " iterations." << std::endl;
break;
}
if( fixedTimeStep==true){
break;
}
}
/* } */
if (!continuityConverged && !momentumConverged && BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0)
std::cout << "Convergence tolerance not reached." << std::endl;
/* std::cout << "solve_step_time : " << solve_step_time.elapsed() << std::endl; */
if (mReformDofSet)
this->Clear();
return NormDp;
}
void FinalizeSolutionStep() override
{
/* this->UpdateStressStrain(); */
}
void InitializeSolutionStep() override
{
}
void UpdateTopology(ModelPart& rModelPart, unsigned int echoLevel)
{
KRATOS_TRY;
this->CalculateDisplacementsAndPorosity();
BaseType::MoveMesh();
/* BoundaryNormalsCalculationUtilities BoundaryComputation; */
/* BoundaryComputation.CalculateWeightedBoundaryNormals(rModelPart, echoLevel); */
KRATOS_CATCH("");
}
void CalculatePressureVelocity()
{
ModelPart& rModelPart = BaseType::GetModelPart();
ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double timeInterval = rCurrentProcessInfo[DELTA_TIME];
unsigned int timeStep = rCurrentProcessInfo[STEP];
for (ModelPart::NodeIterator i = rModelPart.NodesBegin();
i != rModelPart.NodesEnd(); ++i)
{
if(timeStep==1){
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0)=0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1)=0;
}else{
double & CurrentPressure = (i)->FastGetSolutionStepValue(PRESSURE, 0);
double & PreviousPressure = (i)->FastGetSolutionStepValue(PRESSURE, 1);
double & CurrentPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0);
CurrentPressureVelocity = (CurrentPressure-PreviousPressure)/timeInterval;
}
}
}
void CalculatePressureAcceleration()
{
ModelPart& rModelPart = BaseType::GetModelPart();
ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double timeInterval = rCurrentProcessInfo[DELTA_TIME];
unsigned int timeStep = rCurrentProcessInfo[STEP];
for (ModelPart::NodeIterator i = rModelPart.NodesBegin();
i != rModelPart.NodesEnd(); ++i)
{
if(timeStep==1){
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0)=0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1)=0;
}else{
double & CurrentPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0);
double & PreviousPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1);
double & CurrentPressureAcceleration = (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0);
CurrentPressureAcceleration = (CurrentPressureVelocity-PreviousPressureVelocity)/timeInterval;
}
}
}
virtual void CalculateTemporalVariables()
{
ModelPart& rModelPart = BaseType::GetModelPart();
ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo();
Vector& BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS];
for (ModelPart::NodeIterator i = rModelPart.NodesBegin();
i != rModelPart.NodesEnd(); ++i)
{
array_1d<double, 3 > & CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0);
array_1d<double, 3 > & PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1);
array_1d<double, 3 > & CurrentAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 0);
array_1d<double, 3 > & PreviousAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 1);
/* if((i)->IsNot(ISOLATED) || (i)->Is(SOLID)){ */
if((i)->IsNot(ISOLATED) && (i)->IsNot(RIGID)){
UpdateAccelerations (CurrentAcceleration, CurrentVelocity, PreviousAcceleration, PreviousVelocity,BDFcoeffs);
}else if((i)->Is(RIGID)){
array_1d<double, 3> Zeros(3,0.0);
(i)->FastGetSolutionStepValue(ACCELERATION,0) = Zeros;
(i)->FastGetSolutionStepValue(ACCELERATION,1) = Zeros;
}else {
(i)->FastGetSolutionStepValue(PRESSURE,0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE,1) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY,0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY,1) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION,0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION,1) = 0.0;
if((i)->SolutionStepsDataHas(VOLUME_ACCELERATION)){
array_1d<double, 3 >& VolumeAcceleration = (i)->FastGetSolutionStepValue(VOLUME_ACCELERATION);
(i)->FastGetSolutionStepValue(ACCELERATION,0) = VolumeAcceleration;
(i)->FastGetSolutionStepValue(VELOCITY,0) += VolumeAcceleration*rCurrentProcessInfo[DELTA_TIME];
}
}
const double timeInterval = rCurrentProcessInfo[DELTA_TIME];
unsigned int timeStep = rCurrentProcessInfo[STEP];
if(timeStep==1){
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0)=0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1)=0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0)=0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1)=0;
}else{
double & CurrentPressure = (i)->FastGetSolutionStepValue(PRESSURE, 0);
double & PreviousPressure = (i)->FastGetSolutionStepValue(PRESSURE, 1);
double & CurrentPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0);
double & CurrentPressureAcceleration = (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0);
CurrentPressureAcceleration = CurrentPressureVelocity/timeInterval;
CurrentPressureVelocity = (CurrentPressure-PreviousPressure)/timeInterval;
CurrentPressureAcceleration += -CurrentPressureVelocity/timeInterval;
}
}
}
void CalculateAccelerations()
{
ModelPart& rModelPart = BaseType::GetModelPart();
ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo();
Vector& BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS];
for (ModelPart::NodeIterator i = rModelPart.NodesBegin();
i != rModelPart.NodesEnd(); ++i)
{
array_1d<double, 3 > & CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0);
array_1d<double, 3 > & PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1);
array_1d<double, 3 > & CurrentAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 0);
array_1d<double, 3 > & PreviousAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 1);
/* if((i)->IsNot(ISOLATED) || (i)->Is(SOLID)){ */
if((i)->IsNot(ISOLATED) && (i)->IsNot(RIGID)){
UpdateAccelerations (CurrentAcceleration, CurrentVelocity, PreviousAcceleration, PreviousVelocity,BDFcoeffs);
}else if((i)->Is(RIGID)){
array_1d<double, 3> Zeros(3,0.0);
(i)->FastGetSolutionStepValue(ACCELERATION,0) = Zeros;
(i)->FastGetSolutionStepValue(ACCELERATION,1) = Zeros;
}else {
(i)->FastGetSolutionStepValue(PRESSURE,0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE,1) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY,0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY,1) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION,0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION,1) = 0.0;
if((i)->SolutionStepsDataHas(VOLUME_ACCELERATION)){
array_1d<double, 3 >& VolumeAcceleration = (i)->FastGetSolutionStepValue(VOLUME_ACCELERATION);
(i)->FastGetSolutionStepValue(ACCELERATION,0) = VolumeAcceleration;
(i)->FastGetSolutionStepValue(VELOCITY,0) += VolumeAcceleration*rCurrentProcessInfo[DELTA_TIME];
}
}
}
}
inline void UpdateAccelerations(array_1d<double, 3 > & CurrentAcceleration,
const array_1d<double, 3 > & CurrentVelocity,
array_1d<double, 3 > & PreviousAcceleration,
const array_1d<double, 3 > & PreviousVelocity,
Vector& BDFcoeffs)
{
/* noalias(PreviousAcceleration)=CurrentAcceleration; */
noalias(CurrentAcceleration) = -BDFcoeffs[1]*(CurrentVelocity-PreviousVelocity) - PreviousAcceleration ;
// std::cout<<"rBDFCoeffs[0] is "<<rBDFCoeffs[0]<<std::endl;//3/(2*delta_t)
// std::cout<<"rBDFCoeffs[1] is "<<rBDFCoeffs[1]<<std::endl;//-2/(delta_t)
// std::cout<<"rBDFCoeffs[2] is "<<rBDFCoeffs[2]<<std::endl;//1/(2*delta_t)
}
virtual void CalculateDisplacementsAndPorosity()
{
ModelPart& rModelPart = BaseType::GetModelPart();
ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double TimeStep = rCurrentProcessInfo[DELTA_TIME];
for (ModelPart::NodeIterator i = rModelPart.NodesBegin();
i != rModelPart.NodesEnd(); ++i)
{
array_1d<double, 3 > & CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0);
array_1d<double, 3 > & PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1);
array_1d<double, 3 > & CurrentDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 0);
array_1d<double, 3 > & PreviousDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 1);
/* if( i->IsFixed(DISPLACEMENT_X) == false ) */
CurrentDisplacement[0] = 0.5* TimeStep *(CurrentVelocity[0]+PreviousVelocity[0]) + PreviousDisplacement[0];
/* if( i->IsFixed(DISPLACEMENT_Y) == false ) */
CurrentDisplacement[1] = 0.5* TimeStep *(CurrentVelocity[1]+PreviousVelocity[1]) + PreviousDisplacement[1];
/* if( i->IsFixed(DISPLACEMENT_Z) == false ) */
CurrentDisplacement[2] = 0.5* TimeStep *(CurrentVelocity[2]+PreviousVelocity[2]) + PreviousDisplacement[2];
// currentFluidFractionRate = (currentFluidFraction - previousFluidFraction)/TimeStep;
}
}
void UpdateStressStrain()
{
ModelPart& rModelPart = BaseType::GetModelPart();
ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo();
#pragma omp parallel
{
ModelPart::ElementIterator ElemBegin;
ModelPart::ElementIterator ElemEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Elements(),ElemBegin,ElemEnd);
for ( ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem )
{
/* itElem-> InitializeElementStrainStressState(); */
itElem-> InitializeSolutionStep(rCurrentProcessInfo);
}
}
/* this->CalculateAccelerations(); */
/* this->CalculatePressureVelocity(); */
/* this->CalculatePressureAcceleration(); */
this->CalculateTemporalVariables();
}
void Clear() override
{
mpMomentumStrategy->Clear();
mpPressureStrategy->Clear();
}
///@}
///@name Access
///@{
void SetEchoLevel(int Level) override
{
BaseType::SetEchoLevel(Level);
int StrategyLevel = Level > 0 ? Level - 1 : 0;
mpMomentumStrategy->SetEchoLevel(StrategyLevel);
mpPressureStrategy->SetEchoLevel(StrategyLevel);
}
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
std::stringstream buffer;
buffer << "TwoStepVPStrategy" ;
return buffer.str();
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << "TwoStepVPStrategy";
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected Life Cycle
///@{
///@}
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/// Calculate the coefficients for time iteration.
/**
* @param rCurrentProcessInfo ProcessInfo instance from the fluid ModelPart. Must contain DELTA_TIME and BDF_COEFFICIENTS variables.
*/
void SetTimeCoefficients(ProcessInfo& rCurrentProcessInfo)
{
KRATOS_TRY;
if (mTimeOrder == 2)
{
//calculate the BDF coefficients
double Dt = rCurrentProcessInfo[DELTA_TIME];
double OldDt = rCurrentProcessInfo.GetPreviousTimeStepInfo(1)[DELTA_TIME];
double Rho = OldDt / Dt;
double TimeCoeff = 1.0 / (Dt * Rho * Rho + Dt * Rho);
Vector& BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS];
BDFcoeffs.resize(3, false);
BDFcoeffs[0] = TimeCoeff * (Rho * Rho + 2.0 * Rho); //coefficient for step n+1 (3/2Dt if Dt is constant)
BDFcoeffs[1] = -TimeCoeff * (Rho * Rho + 2.0 * Rho + 1.0); //coefficient for step n (-4/2Dt if Dt is constant)
BDFcoeffs[2] = TimeCoeff; //coefficient for step n-1 (1/2Dt if Dt is constant)
}
else if (mTimeOrder == 1)
{
double Dt = rCurrentProcessInfo[DELTA_TIME];
double TimeCoeff = 1.0 / Dt;
Vector& BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS];
BDFcoeffs.resize(2, false);
BDFcoeffs[0] = TimeCoeff; //coefficient for step n+1 (1/Dt)
BDFcoeffs[1] = -TimeCoeff; //coefficient for step n (-1/Dt)
}
KRATOS_CATCH("");
}
bool SolveMomentumIteration(unsigned int it,unsigned int maxIt, bool & fixedTimeStep)
{
ModelPart& rModelPart = BaseType::GetModelPart();
int Rank = rModelPart.GetCommunicator().MyPID();
bool ConvergedMomentum = false;
double NormDv = 0;
fixedTimeStep=false;
// build momentum system and solve for fractional step velocity increment
rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP,1);
/* std::cout<<"---- m o m e n t u m e q u a t i o n s ----"<<std::endl; */
if(it==0){
mpMomentumStrategy->InitializeSolutionStep();
}
/* else{ */
/* NormDv = mpMomentumStrategy->Solve(); */
/* } */
NormDv = mpMomentumStrategy->Solve();
if (BaseType::GetEchoLevel() > 1 && Rank == 0)
std::cout<<"-------------- s o l v e d ! ------------------"<<std::endl;
double DvErrorNorm = 0;
ConvergedMomentum = this->CheckVelocityConvergence(NormDv,DvErrorNorm);
unsigned int iterationForCheck=3;
KRATOS_INFO("TwoStepVPStrategy") << "iteration("<<it<<") Velocity error: "<< DvErrorNorm <<" velTol: " << mVelocityTolerance<< std::endl;
// Check convergence
if(it==maxIt-1){
KRATOS_INFO("TwoStepVPStrategy") << "iteration("<<it<<") Final Velocity error: "<< DvErrorNorm <<" velTol: " << mVelocityTolerance<< std::endl;
fixedTimeStep=this->FixTimeStepMomentum(DvErrorNorm);
}else if(it>iterationForCheck){
fixedTimeStep=this->CheckMomentumConvergence(DvErrorNorm);
}
ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo();
double currentTime = rCurrentProcessInfo[TIME];
double tolerance=0.0000000001;
if(currentTime>(0.25-tolerance) && currentTime<(0.25+tolerance)){
std::ofstream myfile;
myfile.open ("velocityConvergenceAt025s.txt",std::ios::app);
myfile << it << "\t" << DvErrorNorm << "\n";
myfile.close();
}
else if(currentTime>(0.5-tolerance) && currentTime<(0.5+tolerance)){
std::ofstream myfile;
myfile.open ("velocityConvergenceAt05s.txt",std::ios::app);
myfile << it << "\t" << DvErrorNorm << "\n";
myfile.close();
}
else if(currentTime>(0.75-tolerance) && currentTime<(0.75+tolerance)){
std::ofstream myfile;
myfile.open ("velocityConvergenceAt075s.txt",std::ios::app);
myfile << it << "\t" << DvErrorNorm << "\n";
myfile.close();
}
else if(currentTime>(1.0-tolerance) && currentTime<(1.0+tolerance)){
std::ofstream myfile;
myfile.open ("velocityConvergenceAt100s.txt",std::ios::app);
myfile << it << "\t" << DvErrorNorm << "\n";
myfile.close();
}
if (!ConvergedMomentum && BaseType::GetEchoLevel() > 0 && Rank == 0)
std::cout << "Momentum equations did not reach the convergence tolerance." << std::endl;
return ConvergedMomentum;
}
bool SolveContinuityIteration(unsigned int it,unsigned int maxIt)
{
ModelPart& rModelPart = BaseType::GetModelPart();
int Rank = rModelPart.GetCommunicator().MyPID();
bool ConvergedContinuity = false;
double NormDp = 0;
// 2. Pressure solution
rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP,5);
/* std::cout<<" ---- c o n t i n u i t y e q u a t i o n ----"<<std::endl; */
if(it==0){
mpPressureStrategy->InitializeSolutionStep();
}
/* else{ */
/* NormDp = mpPressureStrategy->Solve(); */
/* } */
NormDp = mpPressureStrategy->Solve();
if (BaseType::GetEchoLevel() > 0 && Rank == 0)
std::cout << "The norm of pressure is: " << NormDp << std::endl;
double DpErrorNorm = 0;
ConvergedContinuity = this->CheckPressureConvergence(NormDp,DpErrorNorm);
KRATOS_INFO("TwoStepVPStrategy") <<" iteration("<<it<<") Pressure error: "<<DpErrorNorm <<" presTol: "<<mPressureTolerance << std::endl;
// Check convergence
if(it==maxIt-1){
KRATOS_INFO("TwoStepVPStrategy") <<" iteration("<<it<<") Final Pressure error: "<<DpErrorNorm <<" presTol: "<<mPressureTolerance << std::endl;
ConvergedContinuity=this->FixTimeStepContinuity(DpErrorNorm);
}
ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo();
double currentTime = rCurrentProcessInfo[TIME];
double tolerance=0.0000000001;
if(currentTime>(0.25-tolerance) && currentTime<(0.25+tolerance)){
std::ofstream myfile;
myfile.open ("pressureConvergenceAt025s.txt",std::ios::app);
myfile << it << "\t" << DpErrorNorm << "\n";
myfile.close();
}
else if(currentTime>(0.5-tolerance) && currentTime<(0.5+tolerance)){
std::ofstream myfile;
myfile.open ("pressureConvergenceAt05s.txt",std::ios::app);
myfile << it << "\t" << DpErrorNorm << "\n";
myfile.close();
}
else if(currentTime>(0.75-tolerance) && currentTime<(0.75+tolerance)){
std::ofstream myfile;
myfile.open ("pressureConvergenceAt075s.txt",std::ios::app);
myfile << it << "\t" << DpErrorNorm << "\n";
myfile.close();
}
else if(currentTime>(1.0-tolerance) && currentTime<(1.0+tolerance)){
std::ofstream myfile;
myfile.open ("pressureConvergenceAt100s.txt",std::ios::app);
myfile << it << "\t" << DpErrorNorm << "\n";
myfile.close();
}
if (!ConvergedContinuity && BaseType::GetEchoLevel() > 0 && Rank == 0)
std::cout << "Continuity equation did not reach the convergence tolerance." << std::endl;
return ConvergedContinuity;
}
void ComputeErrorL2Norm()
{
ModelPart& rModelPart = BaseType::GetModelPart();
ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double currentTime = rCurrentProcessInfo[TIME];
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
long double sumErrorL2Velocity = 0;
long double sumErrorL2VelocityX = 0;
long double sumErrorL2VelocityY = 0;
long double sumErrorL2Pressure = 0;
long double sumErrorL2TauXX = 0;
long double sumErrorL2TauYY = 0;
long double sumErrorL2TauXY = 0;
#pragma omp parallel
{
ModelPart::ElementIterator ElemBegin;
ModelPart::ElementIterator ElemEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Elements(),ElemBegin,ElemEnd);
for ( ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem )
{
Element::GeometryType& geometry = itElem->GetGeometry();
long double nodalArea=0;
if(dimension==2){
nodalArea=geometry.Area()/3.0;
}else if(dimension==3){
nodalArea=geometry.Volume()*0.25;
}
long double bariPosX=0;
long double bariPosY=0;
long double eleErrorL2Velocity = 0;
long double eleErrorL2VelocityX = 0;
long double eleErrorL2VelocityY = 0;
long double eleErrorL2Pressure = 0;
//ShapeFunctionDerivativesArrayType DN_DX;
Matrix NContainer;
NContainer = geometry.ShapeFunctionsValues(GeometryData::GI_GAUSS_1);
//this->CalculateGeometryData(DN_DX,NContainer,GaussWeights);
const Vector& N = row(NContainer,0);
// itElem->EvaluateInPoint(elementalPressure,PRESSURE,N);
const unsigned int NumNodes = geometry.size();
double elementalPressure = N[0] * geometry(0)->FastGetSolutionStepValue(PRESSURE);
double elementalVelocityX = N[0] * geometry(0)->FastGetSolutionStepValue(VELOCITY_X);
double elementalVelocityY = N[0] * geometry(0)->FastGetSolutionStepValue(VELOCITY_Y);;
for(unsigned int i = 1; i < NumNodes; i++)
{
elementalPressure += N[i] * geometry(i)->FastGetSolutionStepValue(PRESSURE);
elementalVelocityX += N[i] * geometry(i)->FastGetSolutionStepValue(VELOCITY_X);
elementalVelocityY += N[i] * geometry(i)->FastGetSolutionStepValue(VELOCITY_Y);
}
for (unsigned int i = 0; i <geometry.size(); i++)
{
// index = i*dimension;
const long double nodalPosX = geometry(i)->X();
const long double nodalPosY = geometry(i)->Y();
// const long double velX = geometry(i)->FastGetSolutionStepValue(VELOCITY_X);
// const long double velY = geometry(i)->FastGetSolutionStepValue(VELOCITY_Y);
// const long double pressure = geometry(i)->FastGetSolutionStepValue(PRESSURE);
// long double expectedVelocityX = pow(posX,2) * (1.0-posX)*(1.0-posX) * ( 2.0*posY - 6.0*pow(posY,2) + 4.0*pow(posY,3) );
// long double expectedVelocityY = -pow(posY,2) * (1.0-posY)*(1.0-posY) * ( 2.0*posX - 6.0*pow(posX,2) + 4.0*pow(posX,3) );
// long double expectedPressure = -posX * (1.0-posX);
// long double nodalErrorVelocityX = velX - expectedVelocityX;
// long double nodalErrorVelocityY = velY - expectedVelocityY;
// long double nodalErrorPressure = pressure - expectedPressure;
// sumErrorL2Velocity += (pow(nodalErrorVelocityX,2) + pow(nodalErrorVelocityY,2)) * nodalArea;
// sumErrorL2VelocityX += pow(nodalErrorVelocityX,2) * nodalArea;
// sumErrorL2VelocityY += pow(nodalErrorVelocityY,2) * nodalArea;
// sumErrorL2Pressure += pow(nodalErrorPressure,2) * nodalArea;
// eleErrorL2Velocity += pow(nodalErrorVelocityX,2) + pow(nodalErrorVelocityY,2);
// eleErrorL2VelocityX += pow(nodalErrorVelocityX,2);
// eleErrorL2VelocityY += pow(nodalErrorVelocityY,2);
// eleErrorL2Pressure += pow(nodalErrorPressure,2);
bariPosX += nodalPosX/3.0;
bariPosY += nodalPosY/3.0;
}
const long double posX = bariPosX;
const long double posY = bariPosY;
long double expectedVelocityX = pow(posX,2) * (1.0-posX)*(1.0-posX) * ( 2.0*posY - 6.0*pow(posY,2) + 4.0*pow(posY,3) );
long double expectedVelocityY = -pow(posY,2) * (1.0-posY)*(1.0-posY) * ( 2.0*posX - 6.0*pow(posX,2) + 4.0*pow(posX,3) );
long double expectedPressure = -posX * (1.0-posX);
eleErrorL2VelocityX = elementalVelocityX - expectedVelocityX;
eleErrorL2VelocityY = elementalVelocityY - expectedVelocityY;
eleErrorL2Pressure = elementalPressure - expectedPressure;
sumErrorL2VelocityX += pow(eleErrorL2VelocityX,2) * geometry.Area();
sumErrorL2VelocityY += pow(eleErrorL2VelocityY,2) * geometry.Area();
sumErrorL2Pressure += pow(eleErrorL2Pressure,2) * geometry.Area();
// sumErrorL2Velocity += eleErrorL2Velocity * geometry.Area();
// sumErrorL2VelocityX += eleErrorL2VelocityX * geometry.Area();
// sumErrorL2VelocityY += eleErrorL2VelocityY * geometry.Area();
// sumErrorL2Pressure += eleErrorL2Pressure * geometry.Area();
const long double tauXX = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_XX);
const long double tauYY = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_YY);
const long double tauXY = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_XY);
long double expectedTauXX = 2.0*(-4.0 * (1.0-bariPosX) * bariPosX * (-1.0 + 2.0*bariPosX) * bariPosY * (1.0 - 3.0*bariPosY + 2.0*pow(bariPosY,2)));
long double expectedTauYY = 2.0*(4.0 * bariPosX * (1.0 - 3.0*bariPosX + 2.0*pow(bariPosX,2)) * (1.0-bariPosY) * bariPosY * (-1.0 + 2.0*bariPosY));
long double expectedTauXY = (2.0 * (1.0 - 6.0*bariPosY + 6.0*pow(bariPosY,2)) * (1.0-bariPosX)*(1.0-bariPosX) * pow(bariPosX,2) -2.0 * (1.0 - 6.0*bariPosX + 6.0*pow(bariPosX,2)) * (1.0-bariPosY)*(1-bariPosY) * pow(bariPosY,2));
long double nodalErrorTauXX = tauXX - expectedTauXX;
long double nodalErrorTauYY = tauYY - expectedTauYY;
long double nodalErrorTauXY = tauXY - expectedTauXY;
// std::cout<<"tauXX "<<tauXX<<" expectedtauXX "<<expectedTauXX<<" nodalErrorTauXX "<<nodalErrorTauXX<<std::endl;
// std::cout<<"tauyy "<<tauYY<<" expectedtauYY "<<expectedTauYY<<" nodalErrorTauYY "<<nodalErrorTauYY<<std::endl;
// std::cout<<"tauXY "<<tauXY<<" expectedtauXY "<<expectedTauXY<<" nodalErrorTauXY "<<nodalErrorTauXY<<std::endl;
sumErrorL2TauXX += pow(nodalErrorTauXX,2) * geometry.Area();
sumErrorL2TauYY += pow(nodalErrorTauYY,2) * geometry.Area();
sumErrorL2TauXY += pow(nodalErrorTauXY,2) * geometry.Area();
}
}
// long double errorL2Velocity = sumErrorL2Velocity;
// long double errorL2VelocityX = sumErrorL2VelocityX;
// long double errorL2VelocityY = sumErrorL2VelocityY;
// long double errorL2Pressure = sumErrorL2Pressure;
long double errorL2Velocity = sqrt(sumErrorL2Velocity);
long double errorL2VelocityX = sqrt(sumErrorL2VelocityX);
long double errorL2VelocityY = sqrt(sumErrorL2VelocityY);
long double errorL2Pressure = sqrt(sumErrorL2Pressure);
long double errorL2TauXX = sqrt(sumErrorL2TauXX);
long double errorL2TauYY = sqrt(sumErrorL2TauYY);
long double errorL2TauXY = sqrt(sumErrorL2TauXY);
std::ofstream myfileVelocity;
myfileVelocity.open ("errorL2VelocityFile.txt",std::ios::app);
myfileVelocity << currentTime << "\t" << errorL2Velocity << "\n";
myfileVelocity.close();
std::ofstream myfileVelocityX;
myfileVelocityX.open ("errorL2VelocityXFile.txt",std::ios::app);
myfileVelocityX << currentTime << "\t" << errorL2VelocityX << "\n";
myfileVelocityX.close();
std::ofstream myfileVelocityY;
myfileVelocityY.open ("errorL2VelocityYFile.txt",std::ios::app);
myfileVelocityY << currentTime << "\t" << errorL2VelocityY << "\n";
myfileVelocityY.close();
std::ofstream myfilePressure;
myfilePressure.open ("errorL2PressureFile.txt",std::ios::app);
myfilePressure << currentTime << "\t" << errorL2Pressure << "\n";
myfilePressure.close();
std::ofstream myfileTauXX;
myfileTauXX.open ("errorL2TauXXFile.txt",std::ios::app);
myfileTauXX << currentTime << "\t" << errorL2TauXX << "\n";
myfileTauXX.close();
std::ofstream myfileTauYY;
myfileTauYY.open ("errorL2TauYYFile.txt",std::ios::app);
myfileTauYY << currentTime << "\t" << errorL2TauYY << "\n";
myfileTauYY.close();
std::ofstream myfileTauXY;
myfileTauXY.open ("errorL2TauXYFile.txt",std::ios::app);
myfileTauXY << currentTime << "\t" << errorL2TauXY << "\n";
myfileTauXY.close();
}
void ComputeErrorL2NormCasePoiseuille()
{
ModelPart& rModelPart = BaseType::GetModelPart();
ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double currentTime = rCurrentProcessInfo[TIME];
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
double sumErrorL2VelocityTheta = 0;
double sumErrorL2TauTheta = 0;
double r_in=0.2;
double R_out=0.5;
double kappa=r_in/R_out;
double omega=0.5;
double viscosity=100.0;
#pragma omp parallel
{
ModelPart::ElementIterator ElemBegin;
ModelPart::ElementIterator ElemEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Elements(),ElemBegin,ElemEnd);
for ( ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem )
{
Element::GeometryType& geometry = itElem->GetGeometry();
long double nodalArea=0;
if(dimension==2){
nodalArea=geometry.Area()/3.0;
}else if(dimension==3){
nodalArea=geometry.Volume()*0.25;
}
long double bariPosX=0;
long double bariPosY=0;
long double eleErrorL2Velocity = 0;
long double eleErrorL2VelocityX = 0;
long double eleErrorL2VelocityY = 0;
long double eleErrorL2Pressure = 0;
//ShapeFunctionDerivativesArrayType DN_DX;
Matrix NContainer;
NContainer = geometry.ShapeFunctionsValues(GeometryData::GI_GAUSS_1);
//this->CalculateGeometryData(DN_DX,NContainer,GaussWeights);
const Vector& N = row(NContainer,0);
// itElem->EvaluateInPoint(elementalPressure,PRESSURE,N);
const unsigned int NumNodes = geometry.size();
double elementalPressure = N[0] * geometry(0)->FastGetSolutionStepValue(PRESSURE);
double elementalVelocityX = N[0] * geometry(0)->FastGetSolutionStepValue(VELOCITY_X);
double elementalVelocityY = N[0] * geometry(0)->FastGetSolutionStepValue(VELOCITY_Y);;
for(unsigned int i = 1; i < NumNodes; i++)
{
elementalPressure += N[i] * geometry(i)->FastGetSolutionStepValue(PRESSURE);
elementalVelocityX += N[i] * geometry(i)->FastGetSolutionStepValue(VELOCITY_X);
elementalVelocityY += N[i] * geometry(i)->FastGetSolutionStepValue(VELOCITY_Y);
}
for (unsigned int i = 0; i <geometry.size(); i++)
{
// index = i*dimension;
const long double nodalPosX = geometry(i)->X();
const long double nodalPosY = geometry(i)->Y();
bariPosX += nodalPosX/3.0;
bariPosY += nodalPosY/3.0;
}
const long double posX = bariPosX;
const long double posY = bariPosY;
const double rPos = sqrt( pow(posX,2) + pow(posY,2) );
const double cosalfa = posX/rPos;
const double sinalfa = posY/rPos;
const double sin2alfa = 2.0*cosalfa*sinalfa;
const double cos2alfa = 1.0-2.0*pow(sinalfa,2);
double expectedVelocityTheta = pow(kappa,2) * omega * R_out / (1.0 - pow(kappa,2)) * (R_out/rPos - rPos/R_out);
double computedVelocityTheta = sqrt( pow(elementalVelocityX,2) + pow(elementalVelocityY,2) );
double nodalErrorVelocityTheta = computedVelocityTheta - expectedVelocityTheta;
const long double tauXX = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_XX);
const long double tauYY = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_YY);
const long double tauXY = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_XY);
double expectedTauTheta = (2.0 * viscosity * pow(kappa,2) * omega * pow(R_out,2))/(1.0 - pow(kappa,2)) / pow(rPos,2);
double computedTauTheta = (tauXX-tauYY)*sin2alfa/2.0 - tauXY*cos2alfa;
double nodalErrorTauTheta = computedTauTheta - expectedTauTheta;
sumErrorL2VelocityTheta += pow(nodalErrorVelocityTheta,2) * geometry.Area();
sumErrorL2TauTheta += pow(nodalErrorTauTheta,2) * geometry.Area();
}
}
double errorL2VelocityTheta = sqrt(sumErrorL2VelocityTheta);
double errorL2TauTheta = sqrt(sumErrorL2TauTheta);
std::ofstream myfileVelocity;
myfileVelocity.open ("errorL2Poiseuille.txt",std::ios::app);
myfileVelocity << currentTime << "\t" << errorL2VelocityTheta << "\t" << errorL2TauTheta << "\n";
myfileVelocity.close();
}
bool CheckVelocityConvergence(const double NormDv, double& errorNormDv)
{
ModelPart& rModelPart = BaseType::GetModelPart();
double NormV = 0.00;
errorNormDv = 0;
#pragma omp parallel reduction(+:NormV)
{
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodeBegin,NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
const array_1d<double,3> &Vel = itNode->FastGetSolutionStepValue(VELOCITY);
double NormVelNode=0;
for (unsigned int d = 0; d < 3; ++d){
NormVelNode+=Vel[d] * Vel[d];
NormV += Vel[d] * Vel[d];
}
}
}
BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormV);
NormV = sqrt(NormV);
if (NormV == 0.0) NormV = 1.00;
errorNormDv = NormDv / NormV;
if ( BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0){
std::cout << "The norm of velocity increment is: " << NormDv << std::endl;
std::cout << "The norm of velocity is: " << NormV << std::endl;
std::cout << "Velocity error: " << errorNormDv << "mVelocityTolerance: " << mVelocityTolerance<< std::endl;
}
/* else{ */
/* std::cout<<"Velocity error: "<< errorNormDv <<" velTol: " << mVelocityTolerance<< std::endl; */
/* } */
if (errorNormDv < mVelocityTolerance)
{
return true;
}
else{
return false;
}
}
bool CheckPressureConvergence(const double NormDp, double& errorNormDp)
{
ModelPart& rModelPart = BaseType::GetModelPart();
double NormP = 0.00;
errorNormDp = 0;
#pragma omp parallel reduction(+:NormP)
{
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodeBegin,NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
const double Pr = itNode->FastGetSolutionStepValue(PRESSURE);
NormP += Pr * Pr;
}
}
BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormP);
NormP = sqrt(NormP);
if (NormP == 0.0) NormP = 1.00;
errorNormDp = NormDp / NormP;
if ( BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0){
std::cout << " The norm of pressure increment is: " << NormDp << std::endl;
std::cout << " The norm of pressure is: " << NormP << std::endl;
std::cout << " Pressure error: " <<errorNormDp << std::endl;
}
/* else{ */
/* std::cout<<" Pressure error: "<<errorNormDp <<" presTol: "<<mPressureTolerance << std::endl; */
/* } */
if ( errorNormDp< mPressureTolerance)
{
return true;
}
else
return false;
}
bool FixTimeStepMomentum(const double DvErrorNorm)
{
ModelPart& rModelPart = BaseType::GetModelPart();
ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo();
double currentTime = rCurrentProcessInfo[TIME];
double timeInterval = rCurrentProcessInfo[DELTA_TIME];
double minTolerance=0.005;
bool fixedTimeStep=false;
if(currentTime<10*timeInterval){
minTolerance=10;
}
bool isItNan=false;
isItNan=std::isnan(DvErrorNorm);
bool isItInf=false;
isItInf=std::isinf(DvErrorNorm);
if((DvErrorNorm>minTolerance || (DvErrorNorm<0 && DvErrorNorm>0) || (DvErrorNorm!=DvErrorNorm) || isItNan==true || isItInf==true) && DvErrorNorm!=0 && DvErrorNorm!=1){
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE,true);
std::cout << "NOT GOOD CONVERGENCE!!! I'll reduce the next time interval"<<DvErrorNorm<< std::endl;
minTolerance=0.05;
if(DvErrorNorm>minTolerance){
std::cout<< "BAD CONVERGENCE!!! I GO AHEAD WITH THE PREVIOUS VELOCITY AND PRESSURE FIELDS"<<DvErrorNorm<< std::endl;
fixedTimeStep=true;
#pragma omp parallel
{
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodeBegin,NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
itNode->FastGetSolutionStepValue(VELOCITY,0)=itNode->FastGetSolutionStepValue(VELOCITY,1);
itNode->FastGetSolutionStepValue(PRESSURE,0)=itNode->FastGetSolutionStepValue(PRESSURE,1);
itNode->FastGetSolutionStepValue(ACCELERATION,0)=itNode->FastGetSolutionStepValue(ACCELERATION,1);
}
}
}
}else{
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE,false);
}
return fixedTimeStep;
}
bool CheckMomentumConvergence(const double DvErrorNorm)
{
ModelPart& rModelPart = BaseType::GetModelPart();
ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo();
double minTolerance=0.99999;
bool fixedTimeStep=false;
bool isItNan=false;
isItNan=std::isnan(DvErrorNorm);
bool isItInf=false;
isItInf=std::isinf(DvErrorNorm);
if((DvErrorNorm>minTolerance || (DvErrorNorm<0 && DvErrorNorm>0) || (DvErrorNorm!=DvErrorNorm) || isItNan==true || isItInf==true) && DvErrorNorm!=0 && DvErrorNorm!=1){
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE,true);
std::cout<< " BAD CONVERGENCE DETECTED DURING THE ITERATIVE LOOP!!! error: "<<DvErrorNorm<<" higher than 0.9999"<< std::endl;
std::cout<< " I GO AHEAD WITH THE PREVIOUS VELOCITY AND PRESSURE FIELDS"<< std::endl;
fixedTimeStep=true;
#pragma omp parallel
{
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodeBegin,NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
itNode->FastGetSolutionStepValue(VELOCITY,0)=itNode->FastGetSolutionStepValue(VELOCITY,1);
itNode->FastGetSolutionStepValue(PRESSURE,0)=itNode->FastGetSolutionStepValue(PRESSURE,1);
itNode->FastGetSolutionStepValue(ACCELERATION,0)=itNode->FastGetSolutionStepValue(ACCELERATION,1);
}
}
}else{
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE,false);
}
return fixedTimeStep;
}
bool FixTimeStepContinuity(const double DvErrorNorm)
{
ModelPart& rModelPart = BaseType::GetModelPart();
ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo();
double currentTime = rCurrentProcessInfo[TIME];
double timeInterval = rCurrentProcessInfo[DELTA_TIME];
double minTolerance=0.01;
bool fixedTimeStep=false;
if(currentTime<10*timeInterval){
minTolerance=10;
}
bool isItNan=false;
isItNan=std::isnan(DvErrorNorm);
bool isItInf=false;
isItInf=std::isinf(DvErrorNorm);
if((DvErrorNorm>minTolerance || (DvErrorNorm<0 && DvErrorNorm>0) || (DvErrorNorm!=DvErrorNorm) || isItNan==true || isItInf==true) && DvErrorNorm!=0 && DvErrorNorm!=1){
fixedTimeStep=true;
rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE,true);
}else{
rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE,false);
}
return fixedTimeStep;
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
double mVelocityTolerance;
double mPressureTolerance;
unsigned int mMaxPressureIter;
unsigned int mDomainSize;
unsigned int mTimeOrder;
bool mReformDofSet;
// Fractional step index.
/* 1 : Momentum step (calculate fractional step velocity)
* 2-3 : Unused (reserved for componentwise calculation of frac step velocity)
* 4 : Pressure step
* 5 : Computation of projections
* 6 : End of step velocity
*/
// unsigned int mStepId;
/// Scheme for the solution of the momentum equation
StrategyPointerType mpMomentumStrategy;
/// Scheme for the solution of the mass equation
StrategyPointerType mpPressureStrategy;
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
virtual void InitializeStrategy(SolverSettingsType& rSolverConfig)
{
KRATOS_TRY;
mTimeOrder = rSolverConfig.GetTimeOrder();
// Check that input parameters are reasonable and sufficient.
this->Check();
//ModelPart& rModelPart = this->GetModelPart();
mDomainSize = rSolverConfig.GetDomainSize();
mReformDofSet = rSolverConfig.GetReformDofSet();
BaseType::SetEchoLevel(rSolverConfig.GetEchoLevel());
// Initialize strategies for each step
bool HaveVelStrategy = rSolverConfig.FindStrategy(SolverSettingsType::Velocity,mpMomentumStrategy);
if (HaveVelStrategy)
{
rSolverConfig.FindTolerance(SolverSettingsType::Velocity,mVelocityTolerance);
/* rSolverConfig.FindMaxIter(SolverSettingsType::Velocity,mMaxVelocityIter); */
}
else
{
KRATOS_THROW_ERROR(std::runtime_error,"TwoStepVPStrategy error: No Velocity strategy defined in FractionalStepSettings","");
}
bool HavePressStrategy = rSolverConfig.FindStrategy(SolverSettingsType::Pressure,mpPressureStrategy);
if (HavePressStrategy)
{
rSolverConfig.FindTolerance(SolverSettingsType::Pressure,mPressureTolerance);
rSolverConfig.FindMaxIter(SolverSettingsType::Pressure,mMaxPressureIter);
}
else
{
KRATOS_THROW_ERROR(std::runtime_error,"TwoStepVPStrategy error: No Pressure strategy defined in FractionalStepSettings","");
}
// Check input parameters
this->Check();
KRATOS_CATCH("");
}
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
TwoStepVPStrategy& operator=(TwoStepVPStrategy const& rOther){}
/// Copy constructor.
TwoStepVPStrategy(TwoStepVPStrategy const& rOther){}
///@}
}; /// Class TwoStepVPStrategy
///@}
///@name Type Definitions
///@{
///@}
///@} // addtogroup
} // namespace Kratos.
#endif // KRATOS_TWO_STEP_V_P_STRATEGY_H
|
array_sum.c | #include <stdio.h>
#include <omp.h>
int main()
{
int cursum, total,n;
int a[10]={1,2,3,4,5,1,2,3,4,5};
n = sizeof(a)/sizeof(int);
#pragma omp parallel private(cursum) shared(total)
{
cursum = 0;
total = 0;
#pragma omp for
for(int i = 0; i <= n; i++)
{
cursum += a[i];
}
#pragma omp critical
{
total += cursum;
}
}
printf("Total Sum: %d\n", total);
return 0;
} |
3d25pt_var.c | /*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 32;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] =
coef[0][i][j][k] * A[(t)%2][i ][j ][k ] +
coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) +
coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) +
coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) +
coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) +
coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) +
coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) +
coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) +
coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) +
coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) +
coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) +
coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) +
coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ;
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
functions.c | /**
* File: functions.c
* Description:
* Helper file for the 2nd exercise (KNN classification using MPI),
* of the parallel and distributed systems class, THMMY AUTH.
*
* The declarations of the functions defined here are in:
* File: functions.h
* That file also contains COMMENTS about these functions.
*
* Authors:
* Katomeris Nikolaos, AEM: 8551, ngkatomer@auth.gr
* Kyriazis Leandros Giorgos, AEM: 7711, gkyriazt@auth.gr
*
* Date:
* December 2017
*/
#include "functions.h"
int Initialize(int* number_of_elements, int* number_of_attributes, int* number_of_processes, int* number_of_threads,FILE** data_fp, FILE** labels_fp, int argc, char** argv){
if (argc != 3 && argc != 4){
printf("Usage:./%s <number_of_elements> <number_of_attributes> <number_of_threads_per_process> (optional)\n", argv[0]);
return 1;
}
// MPI
MPI_Init(NULL, NULL);
MPI_Comm_size(MPI_COMM_WORLD, number_of_processes);
if (*number_of_processes % 2){
printf("Odd number of processes <%d> is not supported.\n", *number_of_processes);
return 1;
}
*number_of_elements = atoi(argv[1]);
*number_of_attributes = atoi(argv[2]);
if (argc == 4) *number_of_threads = atoi(argv[3]);
else *number_of_threads = 1;
if(*number_of_threads == 0) *number_of_threads = 1;
if (*number_of_elements % *number_of_processes){
printf("number_of_elements %% number_of_attributes must be zero\n");
return 1;
}
if((*number_of_attributes == 784 && *number_of_elements > 10000) || (*number_of_attributes == 30 && *number_of_elements > 60000) || *number_of_attributes <= 0 || number_of_elements <= 0){
printf("These arguments are not supported. Exiting...\n");
return 1;
}
if(*number_of_attributes == 784){
*data_fp = fopen("../Files/train_X_10k_x_784.bin","rb");
*labels_fp = fopen("../Files/Labels_10k_x_1.bin","rb");
if (*data_fp == NULL){
printf("Couldn't open data file. Exiting...\n");
return 1;
}
if (*labels_fp == NULL){
printf("Couldn't open label file. Exiting...\n");
return 1;
}
}
else if(*number_of_attributes == 30){
*data_fp = fopen("../Files/train_X_SVD_60k_x_30.bin","rb");
*labels_fp = fopen("../Files/Labels_60k_x_1.bin","rb");
if (*data_fp == NULL){
printf("Couldn't open data file. Exiting...\n");
return 1;
}
if (*labels_fp == NULL){
printf("Couldn't open label file. Exiting...\n");
return 1;
}
}
else{
printf("No file with %d attributes found\n", *number_of_attributes);
return 1;
}
return 0;
}
element* prepareBuffer(int number_of_elements, int number_of_attributes){
element* my_elements = (element*)malloc(number_of_elements*sizeof(element));
void* packet = malloc(number_of_elements*sizeof(int) +number_of_elements*number_of_attributes*sizeof(double));
if (packet == NULL || my_elements == NULL){
fprintf(stderr, "Couldn't allocate memory at func: prepareBuffer\n");
return NULL;
}
int* label_packet = &((int*) packet)[0];
double* data_packet = (double*) &((int*) packet)[number_of_elements];
int i;
for (i = 0; i < number_of_elements; i++){
my_elements[i].label = &label_packet[i];
my_elements[i].attributes = &data_packet[number_of_attributes*i];
}
return my_elements;
}
void swapElementP(element **one, element **two){
element *temp = *one;
*one = *two;
*two = temp;
}
int iMax(int* array, int size){
int i;
int max_index = 0;
for (i = 1; i < size; i++){
if(array[i] > array[max_index]){
max_index = i;
}
}
return max_index;
}
void findKNN(double** distKNN, int** labelKNN,int lines, int k, int attr_number,element* my_elements, element* other_elements)
{
int i;
#pragma omp parallel for
for (i = 0; i < lines; i++)
{
double distance;
int j, attr, position, counter;
for (j = 0; j < lines; j++)
{
distance = 0;
for (attr = 0; attr < attr_number; attr++){
distance += (my_elements[i].attributes[attr]-other_elements[j].attributes[attr])*(my_elements[i].attributes[attr]-other_elements[j].attributes[attr]);
}
if (distance == 0) continue;
for (position = k - 1; position >= 0; position--){
if (distance < distKNN[i][position]) continue;
else break;
}
position++;
if (position >= k) continue;
for (counter = k - 2; counter >= position; counter--){
distKNN[i][counter+1] = distKNN[i][counter];
labelKNN[i][counter+1] = labelKNN[i][counter];
}
distKNN[i][position] = distance;
labelKNN[i][position] = *other_elements[j].label;
}
}
}
double** initializeKNNTable(int rows, int col)
{
double** my_knn = (double**) malloc(rows*sizeof(double*));
if (my_knn == NULL) {
fprintf(stderr, "Couldn't allocate memory for the knn table");
return NULL;
}
int i, j;
for (i = 0; i < rows; i++){
my_knn[i] = (double*) malloc(col*sizeof(double));
if (my_knn[i] == NULL) {
fprintf(stderr, "Couldn't allocate memory for the knn table row %d", i);
return NULL;
}
}
for (i = 0; i < rows; i++){
for (j = 0; j < col; j++){
my_knn[i][j] = DBL_MAX;
}
}
return my_knn;
}
int** initializeLabelKNNTable(int rows, int col)
{
int** my_knn = (int**) malloc(rows*sizeof(int*));
if (my_knn == NULL) {
fprintf(stderr, "Couldn't allocate memory for the knn table");
return NULL;
}
int i, j;
for (i = 0; i < rows; i++){
my_knn[i] = (int*) malloc(col*sizeof(int));
if (my_knn[i] == NULL) {
fprintf(stderr, "Couldn't allocate memory for the knn table row %d", i);
return NULL;
}
}
for (i = 0; i < rows; i++){
for (j = 0; j < col; j++){
my_knn[i][j] = -1;
}
}
return my_knn;
}
element* loadMyElements(FILE* data_fp, FILE* label_fp, int number_of_elements, int number_of_attributes, int offset)
{
element* my_elements = (element*)malloc(number_of_elements*sizeof(element));
if (my_elements == NULL){
fprintf(stderr, "Couldn't allocate memory at func: loadMyElements\n");
return NULL;
}
void* packet = loadElements(label_fp, data_fp, number_of_elements, number_of_attributes, offset);
if (packet == NULL){
return NULL;
}
int* label_packet = &((int*) packet)[0];
double* data_packet = (double*) &((int*) packet)[number_of_elements];
int i;
for (i = 0; i < number_of_elements; i++){
my_elements[i].label = &label_packet[i];
my_elements[i].attributes = &data_packet[number_of_attributes*i];
}
return my_elements;
}
void* loadElements(FILE* label_fp, FILE* data_fp, int number_of_elements, int number_of_attributes, int offset){
void* elements = malloc(number_of_elements*sizeof(int)+number_of_attributes*number_of_elements*sizeof(double));
if (elements == NULL){
fprintf(stderr ,"Couldn't allocate memory for the table segment"
" at offset %d\n", offset);
return NULL;
}
int n;
//Get Labels
int* labels_start = (int *)elements;
if (fseek(label_fp, offset*number_of_elements*sizeof(int), SEEK_SET))
{
fprintf(stderr, "Error at setting the label file's offset, offset: %d\n", offset);
return NULL;
}
if((n = fread(labels_start, sizeof(int), number_of_elements, label_fp)) != number_of_elements){
fprintf(stderr, "Error at reading the label data, offset: %d, size = %d\n", offset, number_of_elements);
return NULL;
}
//Get attributes
if (fseek(data_fp, offset*number_of_elements*number_of_attributes*sizeof(double), SEEK_SET)){
fprintf(stderr, "Error at setting the attr file's offset, offset: %d\n", offset);
return NULL;
}
double* data_start = (double*)&((int*) elements)[number_of_elements];
if((n = fread(data_start, sizeof(double), number_of_elements*number_of_attributes, data_fp)) != number_of_elements*number_of_attributes){
fprintf(stderr, "Error at reading the attr data, offset: %d, size = %d\n", offset, number_of_elements);
return NULL;
}
return elements;
} |
GB_unop__floor_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__floor_fp32_fp32)
// op(A') function: GB (_unop_tran__floor_fp32_fp32)
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = floorf (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = floorf (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = floorf (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_FLOOR || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__floor_fp32_fp32)
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = floorf (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = floorf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__floor_fp32_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp_doacross.c | // RUN: %libomp-compile-and-run
// XFAIL: gcc-4, gcc-5, clang-3.7, clang-3.8, icc-15, icc-16
#include <stdio.h>
#include <stdlib.h>
#include "omp_testsuite.h"
#ifndef N
#define N 750
#endif
int test_doacross() {
int i, j;
// Allocate and zero out the matrix
int *m = (int *)malloc(sizeof(int) * N * N);
for (i = 0; i < N; ++i) {
for (j = 0; j < N; ++j) {
m[i * N + j] = 0;
}
}
// Have first row and column be 0, 1, 2, 3, etc.
for (i = 0; i < N; ++i)
m[i * N] = i;
for (j = 0; j < N; ++j)
m[j] = j;
// Perform wavefront which results in matrix:
// 0 1 2 3 4
// 1 2 3 4 5
// 2 3 4 5 6
// 3 4 5 6 7
// 4 5 6 7 8
#pragma omp parallel shared(m)
{
int row, col;
#pragma omp for ordered(2)
for (row = 1; row < N; ++row) {
for (col = 1; col < N; ++col) {
#pragma omp ordered depend(sink : row - 1, col) depend(sink : row, col - 1)
m[row * N + col] = m[(row - 1) * N + col] + m[row * N + (col - 1)] -
m[(row - 1) * N + (col - 1)];
#pragma omp ordered depend(source)
}
}
}
// Check the bottom right element to see if iteration dependencies were held
int retval = (m[(N - 1) * N + N - 1] == 2 * (N - 1));
free(m);
return retval;
}
int main(int argc, char **argv) {
int i;
int num_failed = 0;
if (omp_get_max_threads() < 2)
omp_set_num_threads(4);
for (i = 0; i < REPETITIONS; i++) {
if (!test_doacross()) {
num_failed++;
}
}
return num_failed;
}
|
spectra.c | /** @file cl.c Documented spectra module
*
* Julien Lesgourgues, 25.08.2010
*
* This module computes the anisotropy and Fourier power spectra
* \f$ C_l^{X}, P(k), ... \f$'s given the transfer and Bessel functions
* (for anisotropy spectra), the source functions (for Fourier spectra)
* and the primordial spectra.
*
* The following functions can be called from other modules:
*
* -# spectra_init() at the beginning (but after transfer_init())
* -# spectra_cl_at_l() at any time for computing C at any l
* -# spectra_spectrum_at_z() at any time for computing P(k) at any z
* -# spectra_spectrum_at_k_and z() at any time for computing P at any k and z
* -# spectra_free() at the end
*/
#include "spectra.h"
int spectra_bandpower(struct spectra * psp,
int l1,
int l2,
double * TT_II,
double * TT_RI,
double * TT_RR
) {
int l;
int index_md;
double * cl_tot;
double ** cl_md;
double ** cl_md_ic;
class_alloc(cl_tot,psp->ct_size*sizeof(double),psp->error_message);
class_alloc(cl_md,psp->md_size*sizeof(double*),psp->error_message);
class_alloc(cl_md_ic,psp->md_size*sizeof(double*),psp->error_message);
for (index_md=0;index_md<psp->md_size; index_md++) {
class_alloc(cl_md[index_md],psp->ct_size*sizeof(double),psp->error_message);
class_alloc(cl_md_ic[index_md],psp->ct_size*psp->ic_ic_size[index_md]*sizeof(double),psp->error_message);
}
*TT_RR=0.;
*TT_RI=0.;
*TT_II=0.;
for (l=l1; l<=l2; l++) {
class_call(spectra_cl_at_l(psp,
(double)l,
cl_tot,
cl_md,
cl_md_ic),
psp->error_message,
psp->error_message);
*TT_RR += (double)(2*l+1)*cl_md_ic[psp->index_md_scalars][index_symmetric_matrix(0,0,psp->ic_size[psp->index_md_scalars])*psp->ct_size+psp->index_ct_tt];
*TT_RI += (double)(2*l+1)*cl_md_ic[psp->index_md_scalars][index_symmetric_matrix(0,1,psp->ic_size[psp->index_md_scalars])*psp->ct_size+psp->index_ct_tt]*2.;
*TT_II += (double)(2*l+1)*cl_md_ic[psp->index_md_scalars][index_symmetric_matrix(1,1,psp->ic_size[psp->index_md_scalars])*psp->ct_size+psp->index_ct_tt];
}
for (index_md=0;index_md<psp->md_size; index_md++) {
free(cl_md[index_md]);
free(cl_md_ic[index_md]);
}
free(cl_tot);
free(cl_md);
free(cl_md_ic);
return _SUCCESS_;
}
/**
* Anisotropy power spectra C_l's for all types, modes and initial conditions.
*
* This routine evaluates all the C_l's at a given value of l by
* interpolating in the pre-computed table. When relevant, it also
* sums over all initial conditions for each mode, and over all modes.
*
* This function can be
* called from whatever module at whatever time, provided that
* spectra_init() has been called before, and spectra_free() has not
* been called yet.
*
* @param psp Input: pointer to spectra structure (containing pre-computed table)
* @param l Input: multipole number
* @param cl_tot Ouput: total C_l's for all types (TT, TE, EE, etc..)
* @param cl_md Ouput: C_l's for all types (TT, TE, EE, etc..) decomposed mode by mode (scalar, tensor, ...) when relevant
* @param cl_md_ic Ouput: C_l's for all types (TT, TE, EE, etc..) decomposed by pairs of initial conditions (adiabatic, isocurvatures) for each mode (usually, only for the scalar mode) when relevant
* @return the error status
*/
int spectra_cl_at_l(
struct spectra * psp,
double l,
double * cl_tot, /* array with argument cl_tot[index_ct] (must be already allocated) */
double * * cl_md, /* array with argument cl_md[index_md][index_ct] (must be already allocated only if several modes) */
double * * cl_md_ic /* array with argument cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct] (must be already allocated for a given mode only if several ic's) */
) {
/** Summary: */
/** - define local variables */
int last_index;
int index_md;
int index_ic1,index_ic2,index_ic1_ic2;
int index_ct;
/** A) treat case in which there is only one mode and one initial condition.
Then, only cl_tot needs to be filled. */
if ((psp->md_size == 1) && (psp->ic_size[0] == 1)) {
index_md = 0;
if ((int)l <= psp->l[psp->l_size[index_md]-1]) {
/* interpolate at l */
class_call(array_interpolate_spline(psp->l,
psp->l_size[index_md],
psp->cl[index_md],
psp->ddcl[index_md],
psp->ct_size,
l,
&last_index,
cl_tot,
psp->ct_size,
psp->error_message),
psp->error_message,
psp->error_message);
/* set to zero for the types such that l<l_max */
for (index_ct=0; index_ct<psp->ct_size; index_ct++)
if ((int)l > psp->l_max_ct[index_md][index_ct])
cl_tot[index_ct]=0.;
}
else {
for (index_ct=0; index_ct<psp->ct_size; index_ct++)
cl_tot[index_ct]=0.;
}
}
/** B) treat case in which there is only one mode
with several initial condition.
Fill cl_md_ic[index_md=0] and sum it to get cl_tot. */
if ((psp->md_size == 1) && (psp->ic_size[0] > 1)) {
index_md = 0;
for (index_ct=0; index_ct<psp->ct_size; index_ct++)
cl_tot[index_ct]=0.;
for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) {
for (index_ic2 = index_ic1; index_ic2 < psp->ic_size[index_md]; index_ic2++) {
index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]);
if (((int)l <= psp->l[psp->l_size[index_md]-1]) &&
(psp->is_non_zero[index_md][index_ic1_ic2] == _TRUE_)) {
class_call(array_interpolate_spline(psp->l,
psp->l_size[index_md],
psp->cl[index_md],
psp->ddcl[index_md],
psp->ic_ic_size[index_md]*psp->ct_size,
l,
&last_index,
cl_md_ic[index_md],
psp->ic_ic_size[index_md]*psp->ct_size,
psp->error_message),
psp->error_message,
psp->error_message);
for (index_ct=0; index_ct<psp->ct_size; index_ct++)
if ((int)l > psp->l_max_ct[index_md][index_ct])
cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct]=0.;
}
else {
for (index_ct=0; index_ct<psp->ct_size; index_ct++)
cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct]=0.;
}
/* compute cl_tot by summing over cl_md_ic */
for (index_ct=0; index_ct<psp->ct_size; index_ct++) {
if (index_ic1 == index_ic2)
cl_tot[index_ct]+=cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct];
else
cl_tot[index_ct]+=2.*cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct];
}
}
}
}
/** C) loop over modes */
if (psp->md_size > 1) {
for (index_ct=0; index_ct<psp->ct_size; index_ct++)
cl_tot[index_ct]=0.;
for (index_md = 0; index_md < psp->md_size; index_md++) {
/** C.1) treat case in which the mode under consideration
has only one initial condition.
Fill cl_md[index_md]. */
if (psp->ic_size[index_md] == 1) {
if ((int)l <= psp->l[psp->l_size[index_md]-1]) {
class_call(array_interpolate_spline(psp->l,
psp->l_size[index_md],
psp->cl[index_md],
psp->ddcl[index_md],
psp->ct_size,
l,
&last_index,
cl_md[index_md],
psp->ct_size,
psp->error_message),
psp->error_message,
psp->error_message);
for (index_ct=0; index_ct<psp->ct_size; index_ct++)
if ((int)l > psp->l_max_ct[index_md][index_ct])
cl_md[index_md][index_ct]=0.;
}
else {
for (index_ct=0; index_ct<psp->ct_size; index_ct++)
cl_md[index_md][index_ct]=0.;
}
}
/** C.2) treat case in which the mode under consideration
has several initial conditions.
Fill cl_md_ic[index_md] and sum it to get cl_md[index_md] */
if (psp->ic_size[index_md] > 1) {
if ((int)l <= psp->l[psp->l_size[index_md]-1]) {
/* interpolate all ic and ct */
class_call(array_interpolate_spline(psp->l,
psp->l_size[index_md],
psp->cl[index_md],
psp->ddcl[index_md],
psp->ic_ic_size[index_md]*psp->ct_size,
l,
&last_index,
cl_md_ic[index_md],
psp->ic_ic_size[index_md]*psp->ct_size,
psp->error_message),
psp->error_message,
psp->error_message);
/* set to zero some of the components */
for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) {
for (index_ic2 = index_ic1; index_ic2 < psp->ic_size[index_md]; index_ic2++) {
index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]);
for (index_ct=0; index_ct<psp->ct_size; index_ct++) {
if (((int)l > psp->l_max_ct[index_md][index_ct]) || (psp->is_non_zero[index_md][index_ic1_ic2] == _FALSE_))
cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct]=0.;
}
}
}
}
/* if l was too big, set anyway all components to zero */
else {
for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) {
for (index_ic2 = index_ic1; index_ic2 < psp->ic_size[index_md]; index_ic2++) {
index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]);
for (index_ct=0; index_ct<psp->ct_size; index_ct++) {
cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct]=0.;
}
}
}
}
/* sum up all ic for each mode */
for (index_ct=0; index_ct<psp->ct_size; index_ct++) {
cl_md[index_md][index_ct]=0.;
for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) {
for (index_ic2 = index_ic1; index_ic2 < psp->ic_size[index_md]; index_ic2++) {
index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]);
if (index_ic1 == index_ic2)
cl_md[index_md][index_ct]+=cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct];
else
cl_md[index_md][index_ct]+=2.*cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct];
}
}
}
}
/** C.3) add contribution of cl_md[index_md] to cl_tot */
for (index_ct=0; index_ct<psp->ct_size; index_ct++)
cl_tot[index_ct]+=cl_md[index_md][index_ct];
}
}
return _SUCCESS_;
}
/**
* Matter power spectrum for arbitrary redshift and for all initial conditions.
*
* This routine evaluates the matter power spectrum at a given value of z by
* interpolating in the pre-computed table (if several values of z have been stored)
* or by directly reading it (if it only contains values at z=0 and we want P(k,z=0))
*
*
* Can be called in two modes: linear or logarithmic.
*
* - linear: returns P(k) (units: Mpc^3)
*
* - logarithmic: returns ln(P(k))
*
* One little subtlety: in case of several correlated initial conditions,
* the cross-correlation spectrum can be negative. Then, in logarithmic mode,
* the non-diagonal elements contain the cross-correlation angle P_12/sqrt(P_11 P_22)
* (from -1 to 1) instead of ln(P_12)
*
* This function can be
* called from whatever module at whatever time, provided that
* spectra_init() has been called before, and spectra_free() has not
* been called yet.
*
* @param pba Input: pointer to background structure (used for converting z into tau)
* @param psp Input: pointer to spectra structure (containing pre-computed table)
* @param mode Input: linear or logarithmic
* @param z Input: redshift
* @param output_tot Ouput: total matter power spectrum P(k) in Mpc**3 (linear mode), or its logarithms (logarithmic mode)
* @param output_ic Ouput: for each pair of initial conditions, matter power spectra P(k) in Mpc**3 (linear mode), or their logarithms and cross-correlation angles (logarithmic mode)
* @return the error status
*/
int spectra_pk_at_z(
struct background * pba,
struct spectra * psp,
enum linear_or_logarithmic mode,
double z,
double * output_tot, /* array with argument output_tot[index_k] (must be already allocated) */
double * output_ic /* array with argument output_tot[index_k * psp->ic_ic_size[index_md] + index_ic1_ic2] (must be already allocated only if more than one initial condition) */
) {
/** Summary: */
/** - define local variables */
int index_md;
int last_index;
int index_k;
double tau,ln_tau;
int index_ic1,index_ic2,index_ic1_ic2;
index_md = psp->index_md_scalars;
/** - first step: convert z into ln(tau) */
class_call(background_tau_of_z(pba,z,&tau),
pba->error_message,
psp->error_message);
class_test(tau <= 0.,
psp->error_message,
"negative or null value of conformal time: cannot interpolate");
ln_tau = log(tau);
/** - second step: for both modes (linear or logarithmic), store the spectrum in logarithmic format in the output array(s) */
/** (a.) if only values at tau=tau_today are stored and we want P(k,z=0), no need to interpolate */
if (psp->ln_tau_size == 1) {
class_test(z != 0.,
psp->error_message,
"asked z=%e but only P(k,z=0) has been tabulated",z);
for (index_k=0; index_k<psp->ln_k_size; index_k++)
if (psp->ic_size[index_md] == 1) {
output_tot[index_k] = psp->ln_pk[index_k];
}
else {
for (index_ic1_ic2 = 0; index_ic1_ic2 < psp->ic_ic_size[index_md]; index_ic1_ic2++) {
output_ic[index_k * psp->ic_ic_size[index_md] + index_ic1_ic2] =
psp->ln_pk[index_k * psp->ic_ic_size[index_md] + index_ic1_ic2];
}
}
}
/** (b.) if several values of tau have been stored, use interpolation routine to get spectra at correct redshift */
else {
if (psp->ic_ic_size[index_md] == 1) {
class_call(array_interpolate_spline(psp->ln_tau,
psp->ln_tau_size,
psp->ln_pk,
psp->ddln_pk,
psp->ln_k_size,
ln_tau,
&last_index,
output_tot,
psp->ln_k_size,
psp->error_message),
psp->error_message,
psp->error_message);
}
else {
class_call(array_interpolate_spline(psp->ln_tau,
psp->ln_tau_size,
psp->ln_pk,
psp->ddln_pk,
psp->ic_ic_size[index_md]*psp->ln_k_size,
ln_tau,
&last_index,
output_ic,
psp->ic_ic_size[index_md]*psp->ln_k_size,
psp->error_message),
psp->error_message,
psp->error_message);
}
}
/** - third step: if there are several initial conditions, compute the total P(k) and set back all uncorrelated coefficients to exactly zero. Check positivity of total P(k). */
if (psp->ic_size[index_md] > 1) {
for (index_k=0; index_k<psp->ln_k_size; index_k++) {
output_tot[index_k] = 0.;
for (index_ic1=0; index_ic1 < psp->ic_size[index_md]; index_ic1++) {
for (index_ic2 = index_ic1; index_ic2 < psp->ic_size[index_md]; index_ic2++) {
index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]);
if (index_ic1 == index_ic2) {
output_tot[index_k] += exp(output_ic[index_k * psp->ic_ic_size[index_md] + index_ic1_ic2]);
}
else {
if (psp->is_non_zero[index_md][index_ic1_ic2] == _TRUE_) {
output_tot[index_k] +=
2. * output_ic[index_k * psp->ic_ic_size[index_md] + index_ic1_ic2] *
sqrt(exp(output_ic[index_k * psp->ic_ic_size[index_md] + index_symmetric_matrix(index_ic1,index_ic1,psp->ic_size[index_md])]) *
exp(output_ic[index_k * psp->ic_ic_size[index_md] + index_symmetric_matrix(index_ic2,index_ic2,psp->ic_size[index_md])]));
}
else
output_ic[index_k * psp->ic_ic_size[index_md] + index_ic1_ic2] = 0.;
}
}
}
class_test(output_tot[index_k] <= 0.,
psp->error_message,
"for k=%e, z=%e, the matrix of initial condition amplitudes was not positive definite, hence P(k)_total=%e results negative",
exp(psp->ln_k[index_k]),z,output_tot[index_k]);
}
}
/** - fourth step: depending on requested mode (linear or logarithmic), apply necessary transformation to the output arrays */
/** (a.) linear mode: if only one initial condition, convert output_pk to linear format; if several initial conditions, convert output_ic to linear format, output_tot is already in this format */
if (mode == linear) {
if (psp->ic_size[index_md] == 1) {
for (index_k=0; index_k<psp->ln_k_size; index_k++) {
output_tot[index_k] = exp(output_tot[index_k]);
}
}
else {
for (index_k=0; index_k<psp->ln_k_size; index_k++) {
for (index_ic1=0; index_ic1 < psp->ic_size[index_md]; index_ic1++) {
index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic1,psp->ic_size[index_md]);
output_ic[index_k * psp->ic_ic_size[index_md] + index_ic1_ic2] = exp(output_ic[index_k * psp->ic_ic_size[index_md] + index_ic1_ic2]);
}
for (index_ic1=0; index_ic1 < psp->ic_size[index_md]; index_ic1++) {
for (index_ic2 = index_ic1+1; index_ic2 < psp->ic_size[index_md]; index_ic2++) {
output_ic[index_k * psp->ic_ic_size[index_md] + index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md])] =
output_ic[index_k * psp->ic_ic_size[index_md] + index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md])]
*sqrt(output_ic[index_k * psp->ic_ic_size[index_md] + index_symmetric_matrix(index_ic1,index_ic1,psp->ic_size[index_md])] *
output_ic[index_k * psp->ic_ic_size[index_md] + index_symmetric_matrix(index_ic2,index_ic2,psp->ic_size[index_md])]);
}
}
}
}
}
/** (b.) logarithmic mode: if only one initial condition, nothing to be done; if several initial conditions, convert output_tot to logarithmic format, output_ic is already in this format */
else {
if (psp->ic_size[index_md] > 1) {
for (index_k=0; index_k<psp->ln_k_size; index_k++) {
/* we have already checked above that output_tot was positive */
output_tot[index_k] = log(output_tot[index_k]);
}
}
}
return _SUCCESS_;
}
/**
* Matter power spectrum for arbitrary wavenumber, redshift and initial condition.
*
* This routine evaluates the matter power spectrum at a given value of k and z by
* interpolating in a table of all P(k)'s computed at this z by spectra_pk_at_z() (when kmin <= k <= kmax),
* or eventually by using directly the primordial spectrum (when 0 <= k < kmin):
* the latter case is an approximation, valid when kmin << comoving Hubble scale today.
* Returns zero when k=0. Returns an error when k<0 or k > kmax.
*
* This function can be
* called from whatever module at whatever time, provided that
* spectra_init() has been called before, and spectra_free() has not
* been called yet.
*
* @param pba Input: pointer to background structure (used for converting z into tau)
* @param ppm Input: pointer to primordial structure (used only in the case 0 < k < kmin)
* @param psp Input: pointer to spectra structure (containing pre-computed table)
* @param k Input: wavenumber in 1/Mpc
* @param z Input: redshift
* @param pk_tot Ouput: total matter power spectrum P(k) in Mpc**3
* @param pk_ic Ouput: for each pair of initial conditions, matter power spectra P(k) in Mpc**3
* @return the error status
*/
int spectra_pk_at_k_and_z(
struct background * pba,
struct primordial * ppm,
struct spectra * psp,
double k,
double z,
double * pk_tot, /* pointer to a single number (must be already allocated) */
double * pk_ic /* array of argument pk_ic[index_ic1_ic2] (must be already allocated only if several initial conditions) */
) {
/** Summary: */
/** - define local variables */
int index_md;
int index_k;
int last_index;
int index_ic1,index_ic2,index_ic1_ic2;
double * spectrum_at_z = NULL;
double * spectrum_at_z_ic = NULL;
double * spline;
double * pk_primordial_k = NULL;
double kmin;
double * pk_primordial_kmin = NULL;
index_md = psp->index_md_scalars;
/** - first step: check that k is in valid range [0:kmax] (the test for z will be done when calling spectra_pk_at_z()) */
class_test((k < 0.) || (k > exp(psp->ln_k[psp->ln_k_size-1])),
psp->error_message,
"k=%e out of bounds [%e:%e]",k,0.,exp(psp->ln_k[psp->ln_k_size-1]));
/** - deal with case 0 <= k < kmin */
if (k < exp(psp->ln_k[0])) {
/** (a.) subcase k=0: then P(k)=0 */
if (k == 0.) {
if (psp->ic_size[index_md] == 1) {
*pk_tot=0.;
}
else {
for (index_ic1_ic2 = 0; index_ic1_ic2 < psp->ic_ic_size[index_md]; index_ic1_ic2++) {
pk_ic[index_ic1_ic2] = 0.;
}
}
}
/** (b.) subcase 0<k<kmin: in this case we know that on super-Hubble scales:
* P(k) = [some number] * k * P_primordial(k)
* so
* P(k) = P(kmin) * (k P_primordial(k)) / (kmin P_primordial(kmin))
* (note that the result is accurate only if kmin is such that [a0 kmin] << H0)
*/
else {
/* compute P(k,z) which contains P(kmin,z)*/
class_alloc(spectrum_at_z,
psp->ln_k_size*sizeof(double),
psp->error_message);
if (psp->ic_size[index_md] > 1) {
class_alloc(spectrum_at_z_ic,
sizeof(double)*psp->ic_ic_size[index_md]*psp->ln_k_size,
psp->error_message);
}
class_call(spectra_pk_at_z(pba,
psp,
linear,
z,
spectrum_at_z,
spectrum_at_z_ic),
psp->error_message,
psp->error_message);
/* compute P_primordial(k) */
class_alloc(pk_primordial_k,
sizeof(double)*psp->ic_ic_size[index_md],
psp->error_message);
class_call(primordial_spectrum_at_k(ppm,
index_md,
linear,
k,
pk_primordial_k),
ppm->error_message,psp->error_message);
/* compute P_primordial(kmin) */
kmin = exp(psp->ln_k[0]);
class_alloc(pk_primordial_kmin,
sizeof(double)*psp->ic_ic_size[index_md],
psp->error_message);
class_call(primordial_spectrum_at_k(ppm,
index_md,
linear,
kmin,
pk_primordial_kmin),
ppm->error_message,
psp->error_message);
/* apply above analytic approximation for P(k) */
index_k=0;
if (psp->ic_size[index_md] == 1) {
index_ic1_ic2 = 0;
*pk_tot = spectrum_at_z[index_k]
*k*pk_primordial_k[index_ic1_ic2]
/kmin/pk_primordial_kmin[index_ic1_ic2];
}
else {
for (index_ic1_ic2 = 0; index_ic1_ic2 < psp->ic_ic_size[index_md]; index_ic1_ic2++) {
pk_ic[index_ic1_ic2] = spectrum_at_z_ic[index_ic1_ic2]
*k*pk_primordial_k[index_ic1_ic2]
/kmin/pk_primordial_kmin[index_ic1_ic2];
}
}
free(spectrum_at_z);
if (psp->ic_size[index_md] > 1)
free(spectrum_at_z_ic);
free(pk_primordial_k);
free(pk_primordial_kmin);
}
}
/** - deal with case kmin <= k <= kmax */
else {
/* compute P(k,z) (in logarithmic format for more accurate interpolation) */
class_alloc(spectrum_at_z,
psp->ln_k_size*sizeof(double),
psp->error_message);
if (psp->ic_size[index_md] > 1) {
class_alloc(spectrum_at_z_ic,
sizeof(double)*psp->ic_ic_size[index_md]*psp->ln_k_size,
psp->error_message);
}
class_call(spectra_pk_at_z(pba,
psp,
logarithmic,
z,
spectrum_at_z,
spectrum_at_z_ic),
psp->error_message,
psp->error_message);
/* get its second derivatives with spline, then interpolate, then convert to linear format */
class_alloc(spline,
sizeof(double)*psp->ic_ic_size[index_md]*psp->ln_k_size,
psp->error_message);
if (psp->ic_size[index_md] == 1) {
class_call(array_spline_table_lines(psp->ln_k,
psp->ln_k_size,
spectrum_at_z,
1,
spline,
_SPLINE_NATURAL_,
psp->error_message),
psp->error_message,
psp->error_message);
class_call(array_interpolate_spline(psp->ln_k,
psp->ln_k_size,
spectrum_at_z,
spline,
1,
log(k),
&last_index,
pk_tot,
1,
psp->error_message),
psp->error_message,
psp->error_message);
*pk_tot = exp(*pk_tot);
}
else {
class_call(array_spline_table_lines(psp->ln_k,
psp->ln_k_size,
spectrum_at_z_ic,
psp->ic_ic_size[index_md],
spline,
_SPLINE_NATURAL_,
psp->error_message),
psp->error_message,
psp->error_message);
class_call(array_interpolate_spline(psp->ln_k,
psp->ln_k_size,
spectrum_at_z_ic,
spline,
psp->ic_ic_size[index_md],
log(k),
&last_index,
pk_ic,
psp->ic_ic_size[index_md],
psp->error_message),
psp->error_message,
psp->error_message);
for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) {
index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic1,psp->ic_size[index_md]);
pk_ic[index_ic1_ic2] = exp(pk_ic[index_ic1_ic2]);
}
for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) {
for (index_ic2 = index_ic1+1; index_ic2 < psp->ic_size[index_md]; index_ic2++) {
index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]);
if (psp->is_non_zero[index_md][index_ic1_ic2] == _TRUE_) {
pk_ic[index_ic1_ic2] = pk_ic[index_ic1_ic2]*
sqrt(pk_ic[index_symmetric_matrix(index_ic1,index_ic1,psp->ic_size[index_md])]*
pk_ic[index_symmetric_matrix(index_ic2,index_ic2,psp->ic_size[index_md])]);
}
else {
pk_ic[index_ic1_ic2] = 0.;
}
}
}
free(spectrum_at_z_ic);
}
free(spectrum_at_z);
free(spline);
}
/** - last step: if more than one condition, sum over pk_ic to get pk_tot, and set back coefficients of non-correlated pairs to exactly zero. */
if (psp->ic_size[index_md] > 1) {
*pk_tot = 0.;
for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) {
for (index_ic2 = index_ic1; index_ic2 < psp->ic_size[index_md]; index_ic2++) {
index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]);
if (psp->is_non_zero[index_md][index_ic1_ic2] == _TRUE_) {
if (index_ic1 == index_ic2)
*pk_tot += pk_ic[index_ic1_ic2];
else
*pk_tot += 2.*pk_ic[index_ic1_ic2];
}
else {
pk_ic[index_ic1_ic2] = 0.;
}
}
}
class_test(*pk_tot <= 0.,
psp->error_message,
"for k=%e, the matrix of initial condition amplitudes was not positive definite, hence P(k)_total results negative",k);
}
return _SUCCESS_;
}
/**
* Non-linear total matter power spectrum for arbitrary redshift.
*
* This routine evaluates the non-linear matter power spectrum at a given value of z by
* interpolating in the pre-computed table (if several values of z have been stored)
* or by directly reading it (if it only contains values at z=0 and we want P(k,z=0))
*
*
* Can be called in two modes: linear or logarithmic.
*
* - linear: returns P(k) (units: Mpc^3)
*
* - logarithmic: returns ln(P(k))
*
* This function can be
* called from whatever module at whatever time, provided that
* spectra_init() has been called before, and spectra_free() has not
* been called yet.
*
* @param pba Input: pointer to background structure (used for converting z into tau)
* @param psp Input: pointer to spectra structure (containing pre-computed table)
* @param mode Input: linear or logarithmic
* @param z Input: redshift
* @param output_tot Ouput: total matter power spectrum P(k) in Mpc**3 (linear mode), or its logarithms (logarithmic mode)
* @return the error status
*/
int spectra_pk_nl_at_z(
struct background * pba,
struct spectra * psp,
enum linear_or_logarithmic mode,
double z,
double * output_tot /* array with argument output_tot[index_k] (must be already allocated) */
) {
/** Summary: */
/** - define local variables */
int last_index;
int index_k;
double tau,ln_tau;
/** - first step: convert z into ln(tau) */
class_call(background_tau_of_z(pba,z,&tau),
pba->error_message,
psp->error_message);
class_test(tau <= 0.,
psp->error_message,
"negative or null value of conformal time: cannot interpolate");
ln_tau = log(tau);
/** - second step: for both modes (linear or logarithmic), store the spectrum in logarithmic format in the output array(s) */
/** (a.) if only values at tau=tau_today are stored and we want P(k,z=0), no need to interpolate */
if (psp->ln_tau_size == 1) {
class_test(z != 0.,
psp->error_message,
"asked z=%e but only P(k,z=0) has been tabulated",z);
for (index_k=0; index_k<psp->ln_k_size; index_k++) {
output_tot[index_k] = psp->ln_pk_nl[index_k];
}
}
/** (b.) if several values of tau have been stored, use interpolation routine to get spectra at correct redshift */
else {
class_call(array_interpolate_spline(psp->ln_tau,
psp->ln_tau_size,
psp->ln_pk_nl,
psp->ddln_pk_nl,
psp->ln_k_size,
ln_tau,
&last_index,
output_tot,
psp->ln_k_size,
psp->error_message),
psp->error_message,
psp->error_message);
}
/** - fourth step: eventually convert to linear format */
if (mode == linear) {
for (index_k=0; index_k<psp->ln_k_size; index_k++) {
output_tot[index_k] = exp(output_tot[index_k]);
}
}
return _SUCCESS_;
}
/**
* Non-linear total matter power spectrum for arbitrary wavenumber and redshift.
*
* This routine evaluates the matter power spectrum at a given value of k and z by
* interpolating in a table of all P(k)'s computed at this z by spectra_pk_nl_at_z() (when kmin <= k <= kmax),
* or eventually by using directly the primordial spectrum (when 0 <= k < kmin):
* the latter case is an approximation, valid when kmin << comoving Hubble scale today.
* Returns zero when k=0. Returns an error when k<0 or k > kmax.
*
* This function can be
* called from whatever module at whatever time, provided that
* spectra_init() has been called before, and spectra_free() has not
* been called yet.
*
* @param pba Input: pointer to background structure (used for converting z into tau)
* @param ppm Input: pointer to primordial structure (used only in the case 0 < k < kmin)
* @param psp Input: pointer to spectra structure (containing pre-computed table)
* @param k Input: wavenumber in 1/Mpc
* @param z Input: redshift
* @param pk_tot Ouput: total matter power spectrum P(k) in Mpc**3
* @return the error status
*/
int spectra_pk_nl_at_k_and_z(
struct background * pba,
struct primordial * ppm,
struct spectra * psp,
double k,
double z,
double * pk_tot /* pointer to a single number (must be already allocated) */
) {
/** Summary: */
/** - define local variables */
int index_md;
int last_index;
double * spectrum_at_z = NULL;
double * spline;
index_md = psp->index_md_scalars;
/** - first step: check that k is in valid range [0:kmax] (the test for z will be done when calling spectra_pk_at_z()) */
class_test((k < exp(psp->ln_k[0])) || (k > exp(psp->ln_k[psp->ln_k_size-1])),
psp->error_message,
"k=%e out of bounds [%e:%e]",k,0.,exp(psp->ln_k[psp->ln_k_size-1]));
/* compute P(k,z) (in logarithmic format for more accurate interpolation) */
class_alloc(spectrum_at_z,
psp->ln_k_size*sizeof(double),
psp->error_message);
class_call(spectra_pk_nl_at_z(pba,
psp,
logarithmic,
z,
spectrum_at_z),
psp->error_message,
psp->error_message);
/* get its second derivatives with spline, then interpolate, then convert to linear format */
class_alloc(spline,
sizeof(double)*psp->ic_ic_size[index_md]*psp->ln_k_size,
psp->error_message);
class_call(array_spline_table_lines(psp->ln_k,
psp->ln_k_size,
spectrum_at_z,
1,
spline,
_SPLINE_NATURAL_,
psp->error_message),
psp->error_message,
psp->error_message);
class_call(array_interpolate_spline(psp->ln_k,
psp->ln_k_size,
spectrum_at_z,
spline,
1,
log(k),
&last_index,
pk_tot,
1,
psp->error_message),
psp->error_message,
psp->error_message);
*pk_tot = exp(*pk_tot);
free(spectrum_at_z);
free(spline);
return _SUCCESS_;
}
/**
* Matter transfer functions T_i(k) for arbitrary redshift and for all
* initial conditions.
*
* This routine evaluates the matter transfer functions at a given value of z by
* interpolating in the pre-computed table (if several values of z have been stored)
* or by directly reading it (if it only contains values at z=0 and we want T_i(k,z=0))
*
*
* This function can be
* called from whatever module at whatever time, provided that
* spectra_init() has been called before, and spectra_free() has not
* been called yet.
*
* @param pba Input: pointer to background structure (used for converting z into tau)
* @param psp Input: pointer to spectra structure (containing pre-computed table)
* @param z Input: redshift
* @param output Ouput: matter transfer functions
* @return the error status
*/
int spectra_tk_at_z(
struct background * pba,
struct spectra * psp,
double z,
double * output /* array with argument output[(index_k*psp->ic_size[index_md]+index_ic)*psp->tr_size+index_tr] (must be already allocated) */
) {
/** Summary: */
/** - define local variables */
int index_md;
int last_index;
int index_k;
int index_tr;
double tau,ln_tau;
int index_ic;
index_md = psp->index_md_scalars;
/** - first step: convert z into ln(tau) */
class_call(background_tau_of_z(pba,z,&tau),
pba->error_message,
psp->error_message);
class_test(tau <= 0.,
psp->error_message,
"negative or null value of conformal time: cannot interpolate");
ln_tau = log(tau);
/** - second step: store the matter transfer functions in the output array */
/** (a.) if only values at tau=tau_today are stored and we want T_i(k,z=0), no need to interpolate */
if (psp->ln_tau_size == 1) {
class_test(z != 0.,
psp->error_message,
"asked z=%e but only T_i(k,z=0) has been tabulated",z);
for (index_k=0; index_k<psp->ln_k_size; index_k++)
for (index_tr=0; index_tr<psp->tr_size; index_tr++)
for (index_ic = 0; index_ic < psp->ic_size[index_md]; index_ic++)
output[(index_k*psp->ic_size[index_md]+index_ic)*psp->tr_size+index_tr]
= psp->matter_transfer[(index_k*psp->ic_size[index_md]+index_ic)*psp->tr_size+index_tr];
}
/** (b.) if several values of tau have been stored, use interpolation routine to get spectra at correct redshift */
else {
class_call(array_interpolate_spline(psp->ln_tau,
psp->ln_tau_size,
psp->matter_transfer,
psp->ddmatter_transfer,
psp->ic_size[index_md]*psp->tr_size*psp->ln_k_size,
ln_tau,
&last_index,
output,
psp->ic_size[index_md]*psp->tr_size*psp->ln_k_size,
psp->error_message),
psp->error_message,
psp->error_message);
}
return _SUCCESS_;
}
/**
* Matter transfer functions T_i(k) for arbitrary wavenumber, redshift
* and initial condition.
*
* This routine evaluates the matter transfer functions at a given
* value of k and z by interpolating in a table of all T_i(k,z)'s
* computed at this z by spectra_tk_at_z() (when kmin <= k <= kmax).
* Returns an error when k<kmin or k > kmax.
*
* This function can be called from whatever module at whatever time,
* provided that spectra_init() has been called before, and
* spectra_free() has not been called yet.
*
* @param pba Input: pointer to background structure (used for converting z into tau)
* @param psp Input: pointer to spectra structure (containing pre-computed table)
* @param k Input: wavenumber in 1/Mpc
* @param z Input: redshift
* @param output Ouput: matter transfer functions
* @return the error status
*/
int spectra_tk_at_k_and_z(
struct background * pba,
struct spectra * psp,
double k,
double z,
double * output /* array with argument output[index_ic*psp->tr_size+index_tr] (must be already allocated) */
) {
/** Summary: */
/** - define local variables */
int index_md;
int last_index;
double * tks_at_z;
double * ddtks_at_z;
index_md = psp->index_md_scalars;
/** - first step: check that k is in valid range [0:kmax] (the test for z will be done when calling spectra_tk_at_z()) */
class_test((k < 0.) || (k > exp(psp->ln_k[psp->ln_k_size-1])),
psp->error_message,
"k=%e out of bounds [%e:%e]",k,0.,exp(psp->ln_k[psp->ln_k_size-1]));
/* compute T_i(k,z) */
class_alloc(tks_at_z,
psp->ln_k_size*psp->tr_size*psp->ic_size[index_md]*sizeof(double),
psp->error_message);
class_call(spectra_tk_at_z(pba,
psp,
z,
tks_at_z),
psp->error_message,
psp->error_message);
/* get its second derivatives w.r.t. k with spline, then interpolate */
class_alloc(ddtks_at_z,
psp->ln_k_size*psp->tr_size*psp->ic_size[index_md]*sizeof(double),
psp->error_message);
class_call(array_spline_table_lines(psp->ln_k,
psp->ln_k_size,
tks_at_z,
psp->tr_size*psp->ic_size[index_md],
ddtks_at_z,
_SPLINE_NATURAL_,
psp->error_message),
psp->error_message,
psp->error_message);
class_call(array_interpolate_spline(psp->ln_k,
psp->ln_k_size,
tks_at_z,
ddtks_at_z,
psp->tr_size*psp->ic_size[index_md],
log(k),
&last_index,
output,
psp->tr_size*psp->ic_size[index_md],
psp->error_message),
psp->error_message,
psp->error_message);
free(tks_at_z);
free(ddtks_at_z);
return _SUCCESS_;
}
/**
* This routine initializes the spectra structure (in particular,
* computes table of anisotropy and Fourier spectra \f$ C_l^{X}, P(k), ... \f$)
*
* @param ppr Input : pointer to precision structure
* @param pba Input : pointer to background structure (will provide H, Omega_m at redshift of interest)
* @param ppt Input : pointer to perturbation structure
* @param ptr Input : pointer to transfer structure
* @param ppm Input : pointer to primordial structure
* @param psp Output: pointer to initialized spectra structure
* @return the error status
*/
int spectra_init(
struct precision * ppr,
struct background * pba,
struct perturbs * ppt,
struct primordial * ppm,
struct nonlinear *pnl,
struct transfers * ptr,
struct spectra * psp
) {
/** Summary: */
double TT_II,TT_RI,TT_RR;
int l1,l2;
/** - check that we really want to compute at least one spectrum */
if ((ppt->has_cls == _FALSE_) &&
(ppt->has_pk_matter == _FALSE_) &&
(ppt->has_density_transfers == _FALSE_) &&
(ppt->has_velocity_transfers == _FALSE_)) {
psp->md_size = 0;
if (psp->spectra_verbose > 0)
printf("No spectra requested. Spectra module skipped.\n");
return _SUCCESS_;
}
else {
if (psp->spectra_verbose > 0)
printf("Computing unlensed linear spectra\n");
}
/** - initialize indices and allocate some of the arrays in the
spectra structure */
class_call(spectra_indices(pba,ppt,ptr,ppm,psp),
psp->error_message,
psp->error_message);
/** - deal with C_l's, if any */
if (ppt->has_cls == _TRUE_) {
class_call(spectra_cls(pba,ppt,ptr,ppm,psp),
psp->error_message,
psp->error_message);
}
else {
psp->ct_size=0;
}
/** - deal with P(k,tau) and T_i(k,tau) */
if ((ppt->has_pk_matter == _TRUE_) || (ppt->has_density_transfers == _TRUE_) || (ppt->has_velocity_transfers == _TRUE_)) {
class_call(spectra_k_and_tau(pba,ppt,psp),
psp->error_message,
psp->error_message);
if (ppt->has_pk_matter == _TRUE_) {
class_call(spectra_pk(pba,ppt,ppm,pnl,psp),
psp->error_message,
psp->error_message);
}
else {
psp->ln_pk=NULL;
}
if ((ppt->has_density_transfers == _TRUE_) || (ppt->has_velocity_transfers == _TRUE_)) {
class_call(spectra_matter_transfers(pba,ppt,psp),
psp->error_message,
psp->error_message);
}
else {
psp->matter_transfer=NULL;
}
}
else {
psp->ln_k_size=0;
}
/* if there is one isocurvature mode, compute and store in the psp
structure the isocurvature contribution to some bandpowers in
different ranges of l, and the contribution to the primordial
spectrum at different wavenumbers (used in the Planck
analysis) */
if ((ppt->has_scalars == _TRUE_) && (ppt->has_cls == _TRUE_) && (ppt->ic_size[ppt->index_md_scalars] == 2)) {
l1=2;
l2=20;
class_call(spectra_bandpower(psp,l1,l2,&TT_II,&TT_RI,&TT_RR),
psp->error_message,
psp->error_message);
class_test(TT_II+TT_RI+TT_RR==0.,
psp->error_message,
"should never happen");
psp->alpha_II_2_20=TT_II/(TT_II+TT_RI+TT_RR);
psp->alpha_RI_2_20=TT_RI/(TT_II+TT_RI+TT_RR);
psp->alpha_RR_2_20=TT_RR/(TT_II+TT_RI+TT_RR);
l1=21;
l2=200;
class_call(spectra_bandpower(psp,l1,l2,&TT_II,&TT_RI,&TT_RR),
psp->error_message,
psp->error_message);
class_test(TT_II+TT_RI+TT_RR==0.,
psp->error_message,
"should never happen");
psp->alpha_II_21_200=TT_II/(TT_II+TT_RI+TT_RR);
psp->alpha_RI_21_200=TT_RI/(TT_II+TT_RI+TT_RR);
psp->alpha_RR_21_200=TT_RR/(TT_II+TT_RI+TT_RR);
l1=201;
l2=2500;
class_call(spectra_bandpower(psp,l1,l2,&TT_II,&TT_RI,&TT_RR),
psp->error_message,
psp->error_message);
class_test(TT_II+TT_RI+TT_RR==0.,
psp->error_message,
"should never happen");
psp->alpha_II_201_2500=TT_II/(TT_II+TT_RI+TT_RR);
psp->alpha_RI_201_2500=TT_RI/(TT_II+TT_RI+TT_RR);
psp->alpha_RR_201_2500=TT_RR/(TT_II+TT_RI+TT_RR);
l1=2;
l2=2500;
class_call(spectra_bandpower(psp,l1,l2,&TT_II,&TT_RI,&TT_RR),
psp->error_message,
psp->error_message);
class_test(TT_II+TT_RI+TT_RR==0.,
psp->error_message,
"should never happen");
psp->alpha_II_2_2500=TT_II/(TT_II+TT_RI+TT_RR);
psp->alpha_RI_2_2500=TT_RI/(TT_II+TT_RI+TT_RR);
psp->alpha_RR_2_2500=TT_RR/(TT_II+TT_RI+TT_RR);
if (ppt->has_cdi==_TRUE_) {
psp->alpha_kp=ppm->f_cdi*ppm->f_cdi
/(1.+ppm->f_cdi*ppm->f_cdi);
psp->alpha_k1=ppm->f_cdi*ppm->f_cdi*exp((ppm->n_cdi-ppm->n_s)*log(0.002/ppm->k_pivot))
/(1.+ppm->f_cdi*ppm->f_cdi*exp((ppm->n_cdi-ppm->n_s)*log(0.002/ppm->k_pivot)));
psp->alpha_k2=ppm->f_cdi*ppm->f_cdi*exp((ppm->n_cdi-ppm->n_s)*log(0.1/ppm->k_pivot))
/(1.+ppm->f_cdi*ppm->f_cdi*exp((ppm->n_cdi-ppm->n_s)*log(0.1/ppm->k_pivot)));
}
if (ppt->has_nid==_TRUE_) {
psp->alpha_kp=ppm->f_nid*ppm->f_nid
/(1.+ppm->f_nid*ppm->f_nid);
psp->alpha_k1=ppm->f_nid*ppm->f_nid*exp((ppm->n_nid-ppm->n_s)*log(0.002/ppm->k_pivot))
/(1.+ppm->f_nid*ppm->f_nid*exp((ppm->n_nid-ppm->n_s)*log(0.002/ppm->k_pivot)));
psp->alpha_k2=ppm->f_nid*ppm->f_nid*exp((ppm->n_nid-ppm->n_s)*log(0.1/ppm->k_pivot))
/(1.+ppm->f_nid*ppm->f_nid*exp((ppm->n_nid-ppm->n_s)*log(0.1/ppm->k_pivot)));
}
if (ppt->has_niv==_TRUE_) {
psp->alpha_kp=ppm->f_niv*ppm->f_niv
/(1.+ppm->f_niv*ppm->f_niv);
psp->alpha_k1=ppm->f_niv*ppm->f_niv*exp((ppm->n_niv-ppm->n_s)*log(0.002/ppm->k_pivot))
/(1.+ppm->f_niv*ppm->f_niv*exp((ppm->n_niv-ppm->n_s)*log(0.002/ppm->k_pivot)));
psp->alpha_k2=ppm->f_niv*ppm->f_niv*exp((ppm->n_niv-ppm->n_s)*log(0.1/ppm->k_pivot))
/(1.+ppm->f_niv*ppm->f_niv*exp((ppm->n_niv-ppm->n_s)*log(0.1/ppm->k_pivot)));
}
}
return _SUCCESS_;
}
/**
* This routine frees all the memory space allocated by spectra_init().
*
* To be called at the end of each run, only when no further calls to
* spectra_cls_at_l(), spectra_pk_at_z(), spectra_pk_at_k_and_z() are needed.
*
* @param psp Input: pointer to spectra structure (which fields must be freed)
* @return the error status
*/
int spectra_free(
struct spectra * psp
) {
int index_md;
if (psp->md_size > 0) {
if (psp->ct_size > 0) {
for (index_md = 0; index_md < psp->md_size; index_md++) {
free(psp->l_max_ct[index_md]);
free(psp->cl[index_md]);
free(psp->ddcl[index_md]);
}
free(psp->l);
free(psp->l_size);
free(psp->l_max_ct);
free(psp->l_max);
free(psp->cl);
free(psp->ddcl);
}
if (psp->ln_k_size > 0) {
free(psp->ln_tau);
free(psp->ln_k);
if (psp->ln_pk != NULL) {
free(psp->ln_pk);
if (psp->ln_tau_size > 1) {
free(psp->ddln_pk);
}
if (psp->ln_pk_nl != NULL) {
free(psp->ln_pk_nl);
if (psp->ln_tau_size > 1) {
free(psp->ddln_pk_nl);
}
}
}
if (psp->matter_transfer != NULL) {
free(psp->matter_transfer);
if (psp->ln_tau_size > 1) {
free(psp->ddmatter_transfer);
}
}
}
}
for (index_md=0; index_md < psp->md_size; index_md++)
free(psp->is_non_zero[index_md]);
free(psp->is_non_zero);
free(psp->ic_size);
free(psp->ic_ic_size);
return _SUCCESS_;
}
/**
* This routine defines indices and allocates tables in the spectra structure
*
* @param ppt Input : pointer to perturbation structure
* @param ptr Input : pointer to transfers structure
* @param ppm Input : pointer to primordial structure
* @param psp Input/output: pointer to spectra structure
* @return the error status
*/
int spectra_indices(
struct background * pba,
struct perturbs * ppt,
struct transfers * ptr,
struct primordial * ppm,
struct spectra * psp
){
int index_ct;
int index_md;
int index_ic1_ic2;
int index_tr;
psp->md_size = ppt->md_size;
if (ppt->has_scalars == _TRUE_)
psp->index_md_scalars = ppt->index_md_scalars;
class_alloc(psp->ic_size,
sizeof(int)*psp->md_size,
psp->error_message);
class_alloc(psp->ic_ic_size,
sizeof(int)*psp->md_size,
psp->error_message);
class_alloc(psp->is_non_zero,
sizeof(short *)*psp->md_size,
psp->error_message);
for (index_md=0; index_md < psp->md_size; index_md++) {
psp->ic_size[index_md] = ppm->ic_size[index_md];
psp->ic_ic_size[index_md] = ppm->ic_ic_size[index_md];
class_alloc(psp->is_non_zero[index_md],
sizeof(short)*psp->ic_ic_size[index_md],
psp->error_message);
for (index_ic1_ic2=0; index_ic1_ic2 < psp->ic_ic_size[index_md]; index_ic1_ic2++)
psp->is_non_zero[index_md][index_ic1_ic2] = ppm->is_non_zero[index_md][index_ic1_ic2];
}
if (ppt->has_cls == _TRUE_) {
/* types of C_l's relevant for both scalars and tensors: TT, EE, TE */
index_ct=0;
if (ppt->has_cl_cmb_temperature == _TRUE_) {
psp->has_tt = _TRUE_;
psp->index_ct_tt=index_ct;
index_ct++;
}
else {
psp->has_tt = _FALSE_;
}
if (ppt->has_cl_cmb_polarization == _TRUE_) {
psp->has_ee = _TRUE_;
psp->index_ct_ee=index_ct;
index_ct++;
}
else {
psp->has_ee = _FALSE_;
}
if ((ppt->has_cl_cmb_temperature == _TRUE_) &&
(ppt->has_cl_cmb_polarization == _TRUE_)) {
psp->has_te = _TRUE_;
psp->index_ct_te=index_ct;
index_ct++;
}
else {
psp->has_te = _FALSE_;
}
if (ppt->has_cl_cmb_polarization == _TRUE_) {
psp->has_bb = _TRUE_;
psp->index_ct_bb=index_ct;
index_ct++;
}
else {
psp->has_bb = _FALSE_;
}
/* types of C_l's relevant only for scalars: phi-phi, T-phi, E-phi, d-d, T-d */
if ((ppt->has_cl_cmb_lensing_potential == _TRUE_) && (ppt->has_scalars == _TRUE_)) {
psp->has_pp = _TRUE_;
psp->index_ct_pp=index_ct;
index_ct++;
}
else {
psp->has_pp = _FALSE_;
}
if ((ppt->has_cl_cmb_temperature == _TRUE_) && (ppt->has_cl_cmb_lensing_potential == _TRUE_) && (ppt->has_scalars == _TRUE_)) {
psp->has_tp = _TRUE_;
psp->index_ct_tp=index_ct;
index_ct++;
}
else {
psp->has_tp = _FALSE_;
}
psp->ct_size = index_ct;
if ((ppt->has_cl_cmb_polarization == _TRUE_) && (ppt->has_cl_cmb_lensing_potential == _TRUE_) && (ppt->has_scalars == _TRUE_)) {
psp->has_ep = _TRUE_;
psp->index_ct_ep=index_ct;
index_ct++;
}
else {
psp->has_ep = _FALSE_;
}
if ((ppt->has_scalars == _TRUE_) &&
((ppt->has_cl_number_count == _TRUE_) || (ppt->has_cl_lensing_potential == _TRUE_)))
psp->d_size=ppt->selection_num;
else
psp->d_size=0;
if ((ppt->has_cl_number_count == _TRUE_) && (ppt->has_scalars == _TRUE_)) {
psp->has_dd = _TRUE_;
psp->index_ct_dd=index_ct;
index_ct+=(psp->d_size*(psp->d_size+1)-(psp->d_size-psp->non_diag)*(psp->d_size-1-psp->non_diag))/2;
}
else {
psp->has_dd = _FALSE_;
}
/* the computation of C_l^Td would require a very good sampling of
transfer functions over a wide range, and a huge computation
time. In the current version, we prefer to switch it off, rather
than either slowing down the code considerably, or producing
very inaccurate spectra.
if ((ppt->has_cl_cmb_temperature == _TRUE_) && (ppt->has_cl_number_count == _TRUE_) && (ppt->has_scalars == _TRUE_)) {
psp->has_td = _TRUE_;
psp->index_ct_td=index_ct;
index_ct+=psp->d_size;
}
else {
psp->has_td = _FALSE_;
}
*/
psp->has_td = _FALSE_;
if ((ppt->has_cl_cmb_lensing_potential == _TRUE_) && (ppt->has_cl_number_count == _TRUE_) && (ppt->has_scalars == _TRUE_)) {
psp->has_pd = _TRUE_;
psp->index_ct_pd=index_ct;
index_ct+=psp->d_size;
}
else {
psp->has_pd = _FALSE_;
}
psp->has_td = _FALSE_;
if ((ppt->has_cl_lensing_potential == _TRUE_) && (ppt->has_scalars == _TRUE_)) {
psp->has_ll = _TRUE_;
psp->index_ct_ll=index_ct;
index_ct+=(psp->d_size*(psp->d_size+1)-(psp->d_size-psp->non_diag)*(psp->d_size-1-psp->non_diag))/2;
}
else {
psp->has_ll = _FALSE_;
}
/* the computation of C_l^Tl would require a very good sampling of
transfer functions over a wide range, and a huge computation
time. In the current version, we prefer to switch it off, rather
than either slowing down the code considerably, or producing
very inaccurate spectra.
if ((ppt->has_cl_cmb_temperature == _TRUE_) && (ppt->has_cl_lensing_potential == _TRUE_) && (ppt->has_scalars == _TRUE_)) {
psp->has_tl = _TRUE_;
psp->index_ct_tl=index_ct;
index_ct+=psp->d_size;
}
else {
psp->has_tl = _FALSE_;
}
*/
psp->has_tl = _FALSE_;
if ((ppt->has_cl_number_count == _TRUE_) && (ppt->has_cl_lensing_potential == _TRUE_) && (ppt->has_scalars == _TRUE_)) {
psp->has_dl = _TRUE_;
psp->index_ct_dl=index_ct;
index_ct+=(psp->d_size*(psp->d_size+1)-(psp->d_size-psp->non_diag)*(psp->d_size-1-psp->non_diag))/2;
}
else {
psp->has_dl = _FALSE_;
}
psp->ct_size = index_ct;
/* infer from input quantities the l_max for each mode and type,
l_max_ct[index_md][index_type]. Maximize it over index_ct, and
then over index_md. */
class_alloc(psp->l_max,sizeof(int*)*psp->md_size,psp->error_message);
class_alloc(psp->l_max_ct,sizeof(int*)*psp->md_size,psp->error_message);
for (index_md=0; index_md<psp->md_size; index_md++) {
class_calloc(psp->l_max_ct[index_md],psp->ct_size,sizeof(int),psp->error_message);
}
if (ppt->has_scalars == _TRUE_) {
/* spectra computed up to l_scalar_max */
if (psp->has_tt == _TRUE_) psp->l_max_ct[ppt->index_md_scalars][psp->index_ct_tt] = ppt->l_scalar_max;
if (psp->has_ee == _TRUE_) psp->l_max_ct[ppt->index_md_scalars][psp->index_ct_ee] = ppt->l_scalar_max;
if (psp->has_te == _TRUE_) psp->l_max_ct[ppt->index_md_scalars][psp->index_ct_te] = ppt->l_scalar_max;
if (psp->has_pp == _TRUE_) psp->l_max_ct[ppt->index_md_scalars][psp->index_ct_pp] = ppt->l_scalar_max;
if (psp->has_tp == _TRUE_) psp->l_max_ct[ppt->index_md_scalars][psp->index_ct_tp] = ppt->l_scalar_max;
if (psp->has_ep == _TRUE_) psp->l_max_ct[ppt->index_md_scalars][psp->index_ct_ep] = ppt->l_scalar_max;
/* spectra computed up to l_lss_max */
if (psp->has_dd == _TRUE_)
for (index_ct=psp->index_ct_dd;
index_ct<psp->index_ct_dd+(psp->d_size*(psp->d_size+1)-(psp->d_size-psp->non_diag)*(psp->d_size-1-psp->non_diag))/2;
index_ct++)
psp->l_max_ct[ppt->index_md_scalars][index_ct] = ppt->l_lss_max;
if (psp->has_td == _TRUE_)
for (index_ct=psp->index_ct_td;
index_ct<psp->index_ct_td+psp->d_size;
index_ct++)
psp->l_max_ct[ppt->index_md_scalars][index_ct] = MIN(ppt->l_scalar_max,ppt->l_lss_max);
if (psp->has_pd == _TRUE_)
for (index_ct=psp->index_ct_pd;
index_ct<psp->index_ct_pd+psp->d_size;
index_ct++)
psp->l_max_ct[ppt->index_md_scalars][index_ct] = MIN(ppt->l_scalar_max,ppt->l_lss_max);
if (psp->has_ll == _TRUE_)
for (index_ct=psp->index_ct_ll;
index_ct<psp->index_ct_ll+(psp->d_size*(psp->d_size+1)-(psp->d_size-psp->non_diag)*(psp->d_size-1-psp->non_diag))/2;
index_ct++)
psp->l_max_ct[ppt->index_md_scalars][index_ct] = ppt->l_lss_max;
if (psp->has_tl == _TRUE_)
for (index_ct=psp->index_ct_tl;
index_ct<psp->index_ct_tl+psp->d_size;
index_ct++)
psp->l_max_ct[ppt->index_md_scalars][index_ct] = MIN(ppt->l_scalar_max,ppt->l_lss_max);
if (psp->has_dl == _TRUE_)
for (index_ct=psp->index_ct_dl;
index_ct<psp->index_ct_dl+(psp->d_size*(psp->d_size+1)-(psp->d_size-psp->non_diag)*(psp->d_size-1-psp->non_diag))/2;
index_ct++)
psp->l_max_ct[ppt->index_md_scalars][index_ct] = ppt->l_lss_max;
}
if (ppt->has_tensors == _TRUE_) {
/* spectra computed up to l_tensor_max */
if (psp->has_tt == _TRUE_) psp->l_max_ct[ppt->index_md_tensors][psp->index_ct_tt] = ppt->l_tensor_max;
if (psp->has_ee == _TRUE_) psp->l_max_ct[ppt->index_md_tensors][psp->index_ct_ee] = ppt->l_tensor_max;
if (psp->has_te == _TRUE_) psp->l_max_ct[ppt->index_md_tensors][psp->index_ct_te] = ppt->l_tensor_max;
if (psp->has_bb == _TRUE_) psp->l_max_ct[ppt->index_md_tensors][psp->index_ct_bb] = ppt->l_tensor_max;
}
/* maximizations */
psp->l_max_tot = 0.;
for (index_md=0; index_md < psp->md_size; index_md++) {
psp->l_max[index_md] = 0.;
for (index_ct=0.; index_ct<psp->ct_size; index_ct++)
psp->l_max[index_md] = MAX(psp->l_max[index_md],psp->l_max_ct[index_md][index_ct]);
psp->l_max_tot = MAX(psp->l_max_tot,psp->l_max[index_md]);
}
}
/* indices for species associated with a matter transfer function in Fourier space */
index_tr=0;
class_define_index(psp->index_tr_delta_g,ppt->has_source_delta_g,index_tr,1);
class_define_index(psp->index_tr_delta_b,ppt->has_source_delta_b,index_tr,1);
class_define_index(psp->index_tr_delta_cdm,ppt->has_source_delta_cdm,index_tr,1);
class_define_index(psp->index_tr_delta_dcdm,ppt->has_source_delta_dcdm,index_tr,1);
class_define_index(psp->index_tr_delta_scf,ppt->has_source_delta_scf,index_tr,1);
class_define_index(psp->index_tr_delta_fld,ppt->has_source_delta_fld,index_tr,1);
class_define_index(psp->index_tr_delta_ur,ppt->has_source_delta_ur,index_tr,1);
class_define_index(psp->index_tr_delta_dr,ppt->has_source_delta_dr,index_tr,1);
class_define_index(psp->index_tr_delta_ncdm1,ppt->has_source_delta_ncdm,index_tr,pba->N_ncdm);
class_define_index(psp->index_tr_delta_tot,ppt->has_density_transfers,index_tr,1);
/* indices for species associated with a velocity transfer function in Fourier space */
class_define_index(psp->index_tr_theta_g,ppt->has_source_theta_g,index_tr,1);
class_define_index(psp->index_tr_theta_b,ppt->has_source_theta_b,index_tr,1);
class_define_index(psp->index_tr_theta_cdm,ppt->has_source_theta_cdm,index_tr,1);
class_define_index(psp->index_tr_theta_dcdm,ppt->has_source_theta_dcdm,index_tr,1);
class_define_index(psp->index_tr_theta_scf,ppt->has_source_theta_scf,index_tr,1);
class_define_index(psp->index_tr_theta_fld,ppt->has_source_theta_fld,index_tr,1);
class_define_index(psp->index_tr_theta_ur,ppt->has_source_theta_ur,index_tr,1);
class_define_index(psp->index_tr_theta_dr,ppt->has_source_theta_ur,index_tr,1);
class_define_index(psp->index_tr_theta_ncdm1,ppt->has_source_theta_ncdm,index_tr,pba->N_ncdm);
class_define_index(psp->index_tr_theta_tot,ppt->has_velocity_transfers,index_tr,1);
psp->tr_size = index_tr;
return _SUCCESS_;
}
/**
* This routine computes a table of values for all harmonic spectra C_l's,
* given the transfer functions and primordial spectra.
*
* @param ppt Input : pointer to perturbation structure
* @param ptr Input : pointer to transfers structure
* @param ppm Input : pointer to primordial structure
* @param psp Input/Output: pointer to spectra structure
* @return the error status
*/
int spectra_cls(
struct background * pba,
struct perturbs * ppt,
struct transfers * ptr,
struct primordial * ppm,
struct spectra * psp
) {
/** Summary: */
/** - define local variables */
int index_md;
int index_ic1,index_ic2,index_ic1_ic2;
int index_l;
int index_ct;
int cl_integrand_num_columns;
double * cl_integrand; /* array with argument cl_integrand[index_k*cl_integrand_num_columns+1+psp->index_ct] */
double * transfer_ic1; /* array with argument transfer_ic1[index_tt] */
double * transfer_ic2; /* idem */
double * primordial_pk; /* array with argument primordial_pk[index_ic_ic]*/
/* This code can be optionally compiled with the openmp option for parallel computation.
Inside parallel regions, the use of the command "return" is forbidden.
For error management, instead of "return _FAILURE_", we will set the variable below
to "abort = _TRUE_". This will lead to a "return _FAILURE_" jus after leaving the
parallel region. */
int abort;
#ifdef _OPENMP
/* instrumentation times */
double tstart, tstop;
#endif
/** - allocate pointers to arrays where results will be stored */
class_alloc(psp->l_size,sizeof(int)*psp->md_size,psp->error_message);
class_alloc(psp->cl,sizeof(double *)*psp->md_size,psp->error_message);
class_alloc(psp->ddcl,sizeof(double *)*psp->md_size,psp->error_message);
psp->l_size_max = ptr->l_size_max;
class_alloc(psp->l,sizeof(double)*psp->l_size_max,psp->error_message);
/** - store values of l */
for (index_l=0; index_l < psp->l_size_max; index_l++) {
psp->l[index_l] = (double)ptr->l[index_l];
}
/** - loop over modes (scalar, tensors, etc). For each mode: */
for (index_md = 0; index_md < psp->md_size; index_md++) {
/** - a) store number of l values for this mode */
psp->l_size[index_md] = ptr->l_size[index_md];
/** - b) allocate arrays where results will be stored */
class_alloc(psp->cl[index_md],sizeof(double)*psp->l_size[index_md]*psp->ct_size*psp->ic_ic_size[index_md],psp->error_message);
class_alloc(psp->ddcl[index_md],sizeof(double)*psp->l_size[index_md]*psp->ct_size*psp->ic_ic_size[index_md],psp->error_message);
cl_integrand_num_columns = 1+psp->ct_size*2; /* one for k, ct_size for each type, ct_size for each second derivative of each type */
/** d) loop over initial conditions */
for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) {
for (index_ic2 = index_ic1; index_ic2 < psp->ic_size[index_md]; index_ic2++) {
index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]);
/* non-diagonal coefficients should be computed only if non-zero correlation */
if (psp->is_non_zero[index_md][index_ic1_ic2] == _TRUE_) {
/* initialize error management flag */
abort = _FALSE_;
/* beginning of parallel region */
#pragma omp parallel \
shared(ptr,ppm,index_md,psp,ppt,cl_integrand_num_columns,index_ic1,index_ic2,abort) \
private(tstart,cl_integrand,primordial_pk,transfer_ic1,transfer_ic2,index_l,tstop)
{
#ifdef _OPENMP
tstart = omp_get_wtime();
#endif
class_alloc_parallel(cl_integrand,
ptr->q_size*cl_integrand_num_columns*sizeof(double),
psp->error_message);
class_alloc_parallel(primordial_pk,
psp->ic_ic_size[index_md]*sizeof(double),
psp->error_message);
class_alloc_parallel(transfer_ic1,
ptr->tt_size[index_md]*sizeof(double),
psp->error_message);
class_alloc_parallel(transfer_ic2,
ptr->tt_size[index_md]*sizeof(double),
psp->error_message);
#pragma omp for schedule (dynamic)
/** - loop over l values defined in the transfer module.
For each l, compute the C_l's for all types (TT, TE, ...)
by convolving primordial spectra with transfer functions.
This elementary task is assigned to spectra_compute_cl() */
for (index_l=0; index_l < ptr->l_size[index_md]; index_l++) {
#pragma omp flush(abort)
class_call_parallel(spectra_compute_cl(pba,
ppt,
ptr,
ppm,
psp,
index_md,
index_ic1,
index_ic2,
index_l,
cl_integrand_num_columns,
cl_integrand,
primordial_pk,
transfer_ic1,
transfer_ic2),
psp->error_message,
psp->error_message);
} /* end of loop over l */
#ifdef _OPENMP
tstop = omp_get_wtime();
if (psp->spectra_verbose > 1)
printf("In %s: time spent in parallel region (loop over l's) = %e s for thread %d\n",
__func__,tstop-tstart,omp_get_thread_num());
#endif
free(cl_integrand);
free(primordial_pk);
free(transfer_ic1);
free(transfer_ic2);
} /* end of parallel region */
if (abort == _TRUE_) return _FAILURE_;
}
else {
/* set non-diagonal coefficients to zero if pair of ic's uncorrelated */
for (index_l=0; index_l < ptr->l_size[index_md]; index_l++) {
for (index_ct=0; index_ct<psp->ct_size; index_ct++) {
psp->cl[index_md]
[(index_l * psp->ic_ic_size[index_md] + index_ic1_ic2) * psp->ct_size + index_ct]
= 0.;
}
}
}
}
}
/** - e) now that for a given mode, all possible C_l's have been computed,
compute second derivative of the array in which they are stored,
in view of spline interpolation. */
class_call(array_spline_table_lines(psp->l,
psp->l_size[index_md],
psp->cl[index_md],
psp->ic_ic_size[index_md]*psp->ct_size,
psp->ddcl[index_md],
_SPLINE_EST_DERIV_,
psp->error_message),
psp->error_message,
psp->error_message);
}
return _SUCCESS_;
}
/**
* This routine computes the C_l's for a given mode, pair of initial conditions
* and multipole, but for all types (TT, TE...), by convolving the
* transfer functions with the primordial spectra.
*
* @param ppt Input : pointer to perturbation structure
* @param ptr Input : pointer to transfers structure
* @param ppm Input : pointer to primordial structure
* @param psp Input/Output: pointer to spectra structure (result stored here)
* @param index_md Input : index of mode under consideration
* @param index_ic1 Input : index of first initial condition in the correlator
* @param index_ic2 Input : index of second initial condition in the correlato
* @param index_l Input : index of multipole under consideration
* @param cl_integrand_num_column Input : number of columns in cl_integrand
* @param cl_integrand Input : an allocated workspace
* @param primordial_pk Input : table of primordial spectrum values
* @param transfer_ic1 Input : table of transfer function values for first initial condition
* @param transfer_ic2 Input : table of transfer function values for second initial condition
* @return the error status
*/
int spectra_compute_cl(
struct background * pba,
struct perturbs * ppt,
struct transfers * ptr,
struct primordial * ppm,
struct spectra * psp,
int index_md,
int index_ic1,
int index_ic2,
int index_l,
int cl_integrand_num_columns,
double * cl_integrand,
double * primordial_pk,
double * transfer_ic1,
double * transfer_ic2
) {
int index_q;
int index_tt;
int index_ct;
int index_d1,index_d2;
double k;
double clvalue;
int index_ic1_ic2;
double transfer_ic1_temp=0.;
double transfer_ic2_temp=0.;
double * transfer_ic1_nc=NULL;
double * transfer_ic2_nc=NULL;
double factor;
int index_q_spline=0;
index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]);
if (ppt->has_cl_number_count == _TRUE_) {
class_alloc(transfer_ic1_nc,psp->d_size*sizeof(double),psp->error_message);
class_alloc(transfer_ic2_nc,psp->d_size*sizeof(double),psp->error_message);
}
for (index_q=0; index_q < ptr->q_size; index_q++) {
//q = ptr->q[index_q];
k = ptr->k[index_md][index_q];
cl_integrand[index_q*cl_integrand_num_columns+0] = k;
class_call(primordial_spectrum_at_k(ppm,index_md,linear,k,primordial_pk),
ppm->error_message,
psp->error_message);
/* above routine checks that k>0: no possible division by zero below */
for (index_tt=0; index_tt < ptr->tt_size[index_md]; index_tt++) {
transfer_ic1[index_tt] =
ptr->transfer[index_md]
[((index_ic1 * ptr->tt_size[index_md] + index_tt)
* ptr->l_size[index_md] + index_l)
* ptr->q_size + index_q];
if (index_ic1 == index_ic2) {
transfer_ic2[index_tt] = transfer_ic1[index_tt];
}
else {
transfer_ic2[index_tt] = ptr->transfer[index_md]
[((index_ic2 * ptr->tt_size[index_md] + index_tt)
* ptr->l_size[index_md] + index_l)
* ptr->q_size + index_q];
}
}
/* define combinations of transfer functions */
if (ppt->has_cl_cmb_temperature == _TRUE_) {
if (_scalars_) {
transfer_ic1_temp = transfer_ic1[ptr->index_tt_t0] + transfer_ic1[ptr->index_tt_t1] + transfer_ic1[ptr->index_tt_t2];
transfer_ic2_temp = transfer_ic2[ptr->index_tt_t0] + transfer_ic2[ptr->index_tt_t1] + transfer_ic2[ptr->index_tt_t2];
}
if (_vectors_) {
transfer_ic1_temp = transfer_ic1[ptr->index_tt_t1] + transfer_ic1[ptr->index_tt_t2];
transfer_ic2_temp = transfer_ic2[ptr->index_tt_t1] + transfer_ic2[ptr->index_tt_t2];
}
if (_tensors_) {
transfer_ic1_temp = transfer_ic1[ptr->index_tt_t2];
transfer_ic2_temp = transfer_ic2[ptr->index_tt_t2];
}
}
if (ppt->has_cl_number_count == _TRUE_) {
for (index_d1=0; index_d1<psp->d_size; index_d1++) {
transfer_ic1_nc[index_d1] = 0.;
transfer_ic2_nc[index_d1] = 0.;
if (ppt->has_nc_density == _TRUE_) {
transfer_ic1_nc[index_d1] += transfer_ic1[ptr->index_tt_density+index_d1];
transfer_ic2_nc[index_d1] += transfer_ic2[ptr->index_tt_density+index_d1];
}
if (ppt->has_nc_rsd == _TRUE_) {
transfer_ic1_nc[index_d1]
+= transfer_ic1[ptr->index_tt_rsd+index_d1]
+ transfer_ic1[ptr->index_tt_d0+index_d1]
+ transfer_ic1[ptr->index_tt_d1+index_d1];
transfer_ic2_nc[index_d1]
+= transfer_ic2[ptr->index_tt_rsd+index_d1]
+ transfer_ic2[ptr->index_tt_d0+index_d1]
+ transfer_ic2[ptr->index_tt_d1+index_d1];
}
if (ppt->has_nc_lens == _TRUE_) {
transfer_ic1_nc[index_d1] +=
psp->l[index_l]*(psp->l[index_l]+1.)*transfer_ic1[ptr->index_tt_nc_lens+index_d1];
transfer_ic2_nc[index_d1] +=
psp->l[index_l]*(psp->l[index_l]+1.)*transfer_ic2[ptr->index_tt_nc_lens+index_d1];
}
if (ppt->has_nc_gr == _TRUE_) {
transfer_ic1_nc[index_d1]
+= transfer_ic1[ptr->index_tt_nc_g1+index_d1]
+ transfer_ic1[ptr->index_tt_nc_g2+index_d1]
+ transfer_ic1[ptr->index_tt_nc_g3+index_d1]
+ transfer_ic1[ptr->index_tt_nc_g4+index_d1]
+ transfer_ic1[ptr->index_tt_nc_g5+index_d1];
transfer_ic2_nc[index_d1]
+= transfer_ic2[ptr->index_tt_nc_g1+index_d1]
+ transfer_ic2[ptr->index_tt_nc_g2+index_d1]
+ transfer_ic2[ptr->index_tt_nc_g3+index_d1]
+ transfer_ic2[ptr->index_tt_nc_g4+index_d1]
+ transfer_ic2[ptr->index_tt_nc_g5+index_d1];
}
}
}
/* integrand of Cl's */
/* note: we must integrate
C_l = int [4 pi dk/k calP(k) Delta1_l(q) Delta2_l(q)]
where calP(k) is the dimensionless
power spectrum equal to a constant in the scale-invariant case,
and to P(k) = A_s k^(ns-1) otherwise and q=sqrt(k2+K) (scalars)
or sqrt(k2+2K) (vectors) or sqrt(k2+3K) (tensors)
In the literature, people often rewrite the integral in terms
of q and absorb the Jacobian of the change of variables in a redefinition of the primodial
spectrum. Let us illustrate this for scalars:
dk/k = kdk/k2 = qdq/k2 = dq/q * (q/k)^2 = dq/q * [q2/(q2-K)] = q2dq * 1/[q(q2-K)]
This factor 1/[q(q2-K)] is commonly absorbed in the definition of calP. Then one would have
C_l = int [4 pi q2 dq {A_s k^(ns-1)/[q(q2-K)]} Delta1_l(q) Delta2_l(q)]
Sometimes in the literature, the factor (k2-3K)=(q2-4K) present
in the initial conditions of scalar transfer functions (if
normalized to curvature R=1) is also absorbed in the definition
of the power spectrum. Then the curvature power spectrum reads
calP = (q2-4K)/[q(q2-K)] * (k/k)^ns
In CLASS we prefer to define calP = (k/k)^ns like in the flat
case, to have the factor (q2-4K) in the initialk conditions,
and the factor 1/[q(q2-K)] doesn't need to be there since we
integrate over dk/k.
For tensors, the change of variable described above gives a slightly different result:
dk/k = kdk/k2 = qdq/k2 = dq/q * (q/k)^2 = dq/q * [q2/(q2-3K)] = q2dq * 1/[q(q2-3K)]
But for tensors there are extra curvature-related correction factors to
take into account. See the comments in the perturbation module,
related to initial conditions for tensors.
*/
factor = 4. * _PI_ / k;
if (psp->has_tt == _TRUE_)
cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_tt]=
primordial_pk[index_ic1_ic2]
* transfer_ic1_temp
* transfer_ic2_temp
* factor;
if (psp->has_ee == _TRUE_)
cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_ee]=
primordial_pk[index_ic1_ic2]
* transfer_ic1[ptr->index_tt_e]
* transfer_ic2[ptr->index_tt_e]
* factor;
if (psp->has_te == _TRUE_)
cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_te]=
primordial_pk[index_ic1_ic2]
* 0.5*(transfer_ic1_temp * transfer_ic2[ptr->index_tt_e] +
transfer_ic1[ptr->index_tt_e] * transfer_ic2_temp)
* factor;
if (_tensors_ && (psp->has_bb == _TRUE_))
cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_bb]=
primordial_pk[index_ic1_ic2]
* transfer_ic1[ptr->index_tt_b]
* transfer_ic2[ptr->index_tt_b]
* factor;
if (_scalars_ && (psp->has_pp == _TRUE_))
cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_pp]=
primordial_pk[index_ic1_ic2]
* transfer_ic1[ptr->index_tt_lcmb]
* transfer_ic2[ptr->index_tt_lcmb]
* factor;
if (_scalars_ && (psp->has_tp == _TRUE_))
cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_tp]=
primordial_pk[index_ic1_ic2]
* 0.5*(transfer_ic1_temp * transfer_ic2[ptr->index_tt_lcmb] +
transfer_ic1[ptr->index_tt_lcmb] * transfer_ic2_temp)
* factor;
if (_scalars_ && (psp->has_ep == _TRUE_))
cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_ep]=
primordial_pk[index_ic1_ic2]
* 0.5*(transfer_ic1[ptr->index_tt_e] * transfer_ic2[ptr->index_tt_lcmb] +
transfer_ic1[ptr->index_tt_lcmb] * transfer_ic2[ptr->index_tt_e])
* factor;
if (_scalars_ && (psp->has_dd == _TRUE_)) {
index_ct=0;
for (index_d1=0; index_d1<psp->d_size; index_d1++) {
for (index_d2=index_d1; index_d2<=MIN(index_d1+psp->non_diag,psp->d_size-1); index_d2++) {
cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_dd+index_ct]=
primordial_pk[index_ic1_ic2]
* transfer_ic1_nc[index_d1]
* transfer_ic2_nc[index_d2]
* factor;
index_ct++;
}
}
}
if (_scalars_ && (psp->has_td == _TRUE_)) {
for (index_d1=0; index_d1<psp->d_size; index_d1++) {
cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_td+index_d1]=
primordial_pk[index_ic1_ic2]
* 0.5*(transfer_ic1_temp * transfer_ic2_nc[index_d1] +
transfer_ic1_nc[index_d1] * transfer_ic2_temp)
* factor;
}
}
if (_scalars_ && (psp->has_pd == _TRUE_)) {
for (index_d1=0; index_d1<psp->d_size; index_d1++) {
cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_pd+index_d1]=
primordial_pk[index_ic1_ic2]
* 0.5*(transfer_ic1[ptr->index_tt_lcmb] * transfer_ic2_nc[index_d1] +
transfer_ic1_nc[index_d1] * transfer_ic2[ptr->index_tt_lcmb])
* factor;
}
}
if (_scalars_ && (psp->has_ll == _TRUE_)) {
index_ct=0;
for (index_d1=0; index_d1<psp->d_size; index_d1++) {
for (index_d2=index_d1; index_d2<=MIN(index_d1+psp->non_diag,psp->d_size-1); index_d2++) {
cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_ll+index_ct]=
primordial_pk[index_ic1_ic2]
* transfer_ic1[ptr->index_tt_lensing+index_d1]
* transfer_ic2[ptr->index_tt_lensing+index_d2]
* factor;
index_ct++;
}
}
}
if (_scalars_ && (psp->has_tl == _TRUE_)) {
for (index_d1=0; index_d1<psp->d_size; index_d1++) {
cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_tl+index_d1]=
primordial_pk[index_ic1_ic2]
* 0.5*(transfer_ic1_temp * transfer_ic2[ptr->index_tt_lensing+index_d1] +
transfer_ic1[ptr->index_tt_lensing+index_d1] * transfer_ic2_temp)
* factor;
}
}
if (_scalars_ && (psp->has_dl == _TRUE_)) {
index_ct=0;
for (index_d1=0; index_d1<psp->d_size; index_d1++) {
for (index_d2=index_d1; index_d2<=MIN(index_d1+psp->non_diag,psp->d_size-1); index_d2++) {
cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_dl+index_ct]=
primordial_pk[index_ic1_ic2]
* 0.5*(transfer_ic1_nc[index_d1] * transfer_ic2[ptr->index_tt_lensing+index_d2] +
transfer_ic1[ptr->index_tt_lensing+index_d1] * transfer_ic2_nc[index_d2])
* factor;
index_ct++;
}
}
}
}
for (index_ct=0; index_ct<psp->ct_size; index_ct++) {
/* treat null spectra (C_l^BB of scalars, C_l^pp of tensors, etc. */
if ((_scalars_ && (psp->has_bb == _TRUE_) && (index_ct == psp->index_ct_bb)) ||
(_tensors_ && (psp->has_pp == _TRUE_) && (index_ct == psp->index_ct_pp)) ||
(_tensors_ && (psp->has_tp == _TRUE_) && (index_ct == psp->index_ct_tp)) ||
(_tensors_ && (psp->has_ep == _TRUE_) && (index_ct == psp->index_ct_ep)) ||
(_tensors_ && (psp->has_dd == _TRUE_) && (index_ct == psp->index_ct_dd)) ||
(_tensors_ && (psp->has_td == _TRUE_) && (index_ct == psp->index_ct_td)) ||
(_tensors_ && (psp->has_pd == _TRUE_) && (index_ct == psp->index_ct_pd)) ||
(_tensors_ && (psp->has_ll == _TRUE_) && (index_ct == psp->index_ct_ll)) ||
(_tensors_ && (psp->has_tl == _TRUE_) && (index_ct == psp->index_ct_tl)) ||
(_tensors_ && (psp->has_dl == _TRUE_) && (index_ct == psp->index_ct_dl))
) {
psp->cl[index_md]
[(index_l * psp->ic_ic_size[index_md] + index_ic1_ic2) * psp->ct_size + index_ct] = 0.;
}
/* for non-zero spectra, integrate over q */
else {
/* spline the integrand over the whole range of k's */
class_call(array_spline(cl_integrand,
cl_integrand_num_columns,
ptr->q_size,
0,
1+index_ct,
1+psp->ct_size+index_ct,
_SPLINE_EST_DERIV_,
psp->error_message),
psp->error_message,
psp->error_message);
/* Technical point: we will now do a spline integral over the
whole range of k's, excepted in the closed (K>0) case. In
that case, it is a bad idea to spline over the values of k
corresponding to nu<nu_flat_approximation. In this region, nu
values are integer values, so the steps dq and dk have some
discrete jumps. This makes the spline routine less accurate
than a trapezoidal integral with finer sampling. So, in the
closed case, we set index_q_spline to
ptr->index_q_flat_approximation, to tell the integration
routine that below this index, it should treat the integral
as a trapezoidal one. For testing, one is free to set
index_q_spline to 0, to enforce spline integration
everywhere, or to (ptr->q_size-1), to enforce trapezoidal
integration everywhere. */
if (pba->sgnK == 1) {
index_q_spline = ptr->index_q_flat_approximation;
}
class_call(array_integrate_all_trapzd_or_spline(cl_integrand,
cl_integrand_num_columns,
ptr->q_size,
index_q_spline,
0,
1+index_ct,
1+psp->ct_size+index_ct,
&clvalue,
psp->error_message),
psp->error_message,
psp->error_message);
/* in the closed case, instead of an integral, we have a
discrete sum. In practise, this does not matter: the previous
routine does give a correct approximation of the discrete
sum, both in the trapezoidal and spline regions. The only
error comes from the first point: the previous routine
assumes a weight for the first point which is too small
compared to what it would be in the an actual discrete
sum. The line below correct this problem in an exact way.
*/
if (pba->sgnK == 1) {
clvalue += cl_integrand[1+index_ct] * ptr->q[0]/ptr->k[0][0]*sqrt(pba->K)/2.;
}
/* we have the correct C_l now. We can store it in the transfer structure. */
psp->cl[index_md]
[(index_l * psp->ic_ic_size[index_md] + index_ic1_ic2) * psp->ct_size + index_ct]
= clvalue;
}
}
if (ppt->has_cl_number_count == _TRUE_) {
free(transfer_ic1_nc);
free(transfer_ic2_nc);
}
return _SUCCESS_;
}
/**
* This routine computes the values of k and tau at which the matter
* power spectra P(k,tau) and the matter transfer functions T_i(k,tau)
* will be stored.
*
* @param pba Input : pointer to background structure (for z to tau conversion)
* @param ppt Input : pointer to perturbation structure (contain source functions)
* @param psp Input/Output: pointer to spectra structure
* @return the error status
*/
int spectra_k_and_tau(
struct background * pba,
struct perturbs * ppt,
struct spectra * psp
) {
/** Summary: */
/** - define local variables */
int index_k;
int index_tau;
double tau_min;
/** - check the presence of scalar modes */
class_test((ppt->has_scalars == _FALSE_),
psp->error_message,
"you cannot ask for matter power spectrum since you turned off scalar modes");
/** - check the maximum redshift z_max_pk at which P(k,z) and T_i(k,z) should be
computable by interpolation. If it is equal to zero, only P(k,z=0)
needs to be computed. If it is higher, we will store in a table
various P(k,tau) at several values of tau generously encompassing
the range 0<z<z_max_pk */
/* if z_max_pk<0, return error */
class_test((psp->z_max_pk < 0),
psp->error_message,
"asked for negative redshift z=%e",psp->z_max_pk);
/* if z_max_pk=0, there is just one value to store */
if (psp->z_max_pk == 0.) {
psp->ln_tau_size=1;
}
/* if z_max_pk>0, store several values (with a confortable margin above z_max_pk) in view of interpolation */
else{
/* find the first relevant value of tau (last value in the table tau_ampling before tau(z_max)) and infer the number of values of tau at which P(k) must be stored */
class_call(background_tau_of_z(pba,psp->z_max_pk,&tau_min),
pba->error_message,
psp->error_message);
index_tau=0;
class_test((tau_min < ppt->tau_sampling[index_tau]),
psp->error_message,
"you asked for zmax=%e, i.e. taumin=%e, smaller than first possible value =%e",psp->z_max_pk,tau_min,ppt->tau_sampling[0]);
while (ppt->tau_sampling[index_tau] < tau_min){
index_tau++;
}
index_tau --;
/* whenever possible, take a few more values in to avoid boundary effects in the interpolation */
if (index_tau>0) index_tau--;
if (index_tau>0) index_tau--;
if (index_tau>0) index_tau--;
if (index_tau>0) index_tau--;
psp->ln_tau_size=ppt->tau_size-index_tau;
}
/** - allocate and fill table of tau values at which P(k,tau) and T_i(k,tau) are stored */
class_alloc(psp->ln_tau,sizeof(double)*psp->ln_tau_size,psp->error_message);
for (index_tau=0; index_tau<psp->ln_tau_size; index_tau++) {
psp->ln_tau[index_tau]=log(ppt->tau_sampling[index_tau-psp->ln_tau_size+ppt->tau_size]);
}
/** - allocate and fill table of k values at which P(k,tau) is stored */
psp->ln_k_size = ppt->k_size[ppt->index_md_scalars];
class_alloc(psp->ln_k,sizeof(double)*psp->ln_k_size,psp->error_message);
for (index_k=0; index_k<psp->ln_k_size; index_k++) {
class_test(ppt->k[ppt->index_md_scalars][index_k] <= 0.,
psp->error_message,
"stop to avoid segmentation fault");
psp->ln_k[index_k]=log(ppt->k[ppt->index_md_scalars][index_k]);
}
return _SUCCESS_;
}
/**
* This routine computes a table of values for all matter power spectra P(k),
* given the source functions and primordial spectra.
*
* @param pba Input : pointer to background structure (will provide H, Omega_m at redshift of interest)
* @param ppt Input : pointer to perturbation structure (contain source functions)
* @param ppm Input : pointer to primordial structure
* @param psp Input/Output: pointer to spectra structure
* @return the error status
*/
int spectra_pk(
struct background * pba,
struct perturbs * ppt,
struct primordial * ppm,
struct nonlinear *pnl,
struct spectra * psp
) {
/** Summary: */
/** - define local variables */
int index_md;
int index_ic1,index_ic2,index_ic1_ic2;
int index_k;
int index_tau;
double * primordial_pk; /* array with argument primordial_pk[index_ic_ic] */
double source_ic1;
double source_ic2;
double ln_pk_tot;
/** - check the presence of scalar modes */
class_test((ppt->has_scalars == _FALSE_),
psp->error_message,
"you cannot ask for matter power spectrum since you turned off scalar modes");
index_md = psp->index_md_scalars;
/** - allocate temporary vectors where the primordial spectrum and the background quantitites will be stored */
class_alloc(primordial_pk,psp->ic_ic_size[index_md]*sizeof(double),psp->error_message);
/** - allocate and fill array of P(k,tau) values */
class_alloc(psp->ln_pk,
sizeof(double)*psp->ln_tau_size*psp->ln_k_size*psp->ic_ic_size[index_md],
psp->error_message);
if (pnl->method != nl_none) {
class_alloc(psp->ln_pk_nl,
sizeof(double)*psp->ln_tau_size*psp->ln_k_size,
psp->error_message);
}
else {
psp->ln_pk_nl = NULL;
}
for (index_tau=0 ; index_tau < psp->ln_tau_size; index_tau++) {
for (index_k=0; index_k<psp->ln_k_size; index_k++) {
class_call(primordial_spectrum_at_k(ppm,index_md,logarithmic,psp->ln_k[index_k],primordial_pk),
ppm->error_message,
psp->error_message);
ln_pk_tot =0;
/* curvature primordial spectrum:
P_R(k) = 1/(2pi^2) k^3 <R R>
so, primordial curvature correlator:
<R R> = (2pi^2) k^-3 P_R(k)
so, delta_m correlator:
P(k) = <delta_m delta_m> = (2pi^2) k^-3 (source_m)^2 P_R(k)
For isocurvature or cross adiabatic-isocurvature parts,
replace one or two 'R' by 'S_i's */
/* part diagonal in initial conditions */
for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) {
index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic1,psp->ic_size[index_md]);
source_ic1 = ppt->sources[index_md]
[index_ic1 * ppt->tp_size[index_md] + ppt->index_tp_delta_m]
[(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k];
psp->ln_pk[(index_tau * psp->ln_k_size + index_k)* psp->ic_ic_size[index_md] + index_ic1_ic2] =
log(2.*_PI_*_PI_/exp(3.*psp->ln_k[index_k])
*source_ic1*source_ic1
*exp(primordial_pk[index_ic1_ic2]));
ln_pk_tot += psp->ln_pk[(index_tau * psp->ln_k_size + index_k)* psp->ic_ic_size[index_md] + index_ic1_ic2];
}
/* part non-diagonal in initial conditions */
for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) {
for (index_ic2 = index_ic1+1; index_ic2 < psp->ic_size[index_md]; index_ic2++) {
index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]);
if (psp->is_non_zero[index_md][index_ic1_ic2] == _TRUE_) {
source_ic1 = ppt->sources[index_md]
[index_ic1 * ppt->tp_size[index_md] + ppt->index_tp_delta_m]
[(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k];
source_ic2 = ppt->sources[index_md]
[index_ic2 * ppt->tp_size[index_md] + ppt->index_tp_delta_m]
[(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k];
psp->ln_pk[(index_tau * psp->ln_k_size + index_k)* psp->ic_ic_size[index_md] + index_ic1_ic2] =
primordial_pk[index_ic1_ic2]*SIGN(source_ic1)*SIGN(source_ic2);
ln_pk_tot += psp->ln_pk[(index_tau * psp->ln_k_size + index_k)* psp->ic_ic_size[index_md] + index_ic1_ic2];
}
else {
psp->ln_pk[(index_tau * psp->ln_k_size + index_k)* psp->ic_ic_size[index_md] + index_ic1_ic2] = 0.;
}
}
}
/* if non-linear corrections required, compute the total non-linear matter power spectrum */
if (pnl->method != nl_none) {
psp->ln_pk_nl[index_tau * psp->ln_k_size + index_k] =
ln_pk_tot
+ 2.*log(pnl->nl_corr_density[(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]);
}
}
}
/**- if interpolation of P(k,tau) will be needed (as a function of tau),
compute array of second derivatives in view of spline interpolation */
if (psp->ln_tau_size > 1) {
class_alloc(psp->ddln_pk,sizeof(double)*psp->ln_tau_size*psp->ln_k_size*psp->ic_ic_size[index_md],psp->error_message);
class_call(array_spline_table_lines(psp->ln_tau,
psp->ln_tau_size,
psp->ln_pk,
psp->ic_ic_size[index_md]*psp->ln_k_size,
psp->ddln_pk,
_SPLINE_EST_DERIV_,
psp->error_message),
psp->error_message,
psp->error_message);
}
/* compute sigma8 (mean variance today in sphere of radius 8/h Mpc */
class_call(spectra_sigma(pba,ppm,psp,8./pba->h,0.,&(psp->sigma8)),
psp->error_message,
psp->error_message);
if (psp->spectra_verbose>0)
fprintf(stdout," -> sigma8=%g (computed till k = %g h/Mpc)\n",
psp->sigma8,
exp(psp->ln_k[psp->ln_k_size-1])/pba->h);
/**- if interpolation of P_NL(k,tau) will be needed (as a function of tau),
compute array of second derivatives in view of spline interpolation */
if (pnl->method != nl_none) {
if (psp->ln_tau_size > 1) {
class_alloc(psp->ddln_pk_nl,sizeof(double)*psp->ln_tau_size*psp->ln_k_size*psp->ic_ic_size[index_md],psp->error_message);
class_call(array_spline_table_lines(psp->ln_tau,
psp->ln_tau_size,
psp->ln_pk_nl,
psp->ln_k_size,
psp->ddln_pk_nl,
_SPLINE_EST_DERIV_,
psp->error_message),
psp->error_message,
psp->error_message);
}
}
free (primordial_pk);
return _SUCCESS_;
}
/**
* This routine computes sigma(R) given P(k) (does not check that k_max is large
* enough)
*
* @param pba Input: pointer to background structure
* @param ppm Input: pointer to primordial structure
* @param psp Input: pointer to spectra structure
* @param z Input: redhsift
* @param R Input: radius in Mpc
* @param sigma Output: variance in a sphere of radius R (dimensionless)
*/
int spectra_sigma(
struct background * pba,
struct primordial * ppm,
struct spectra * psp,
double R,
double z,
double * sigma
) {
double pk;
double * pk_ic = NULL;
double * array_for_sigma;
int index_num;
int index_k;
int index_y;
int index_ddy;
int i;
double k,W,x;
if (psp->ic_ic_size[psp->index_md_scalars]>1)
class_alloc(pk_ic,
psp->ic_ic_size[psp->index_md_scalars]*sizeof(double),
psp->error_message);
i=0;
index_k=i;
i++;
index_y=i;
i++;
index_ddy=i;
i++;
index_num=i;
class_alloc(array_for_sigma,
psp->ln_k_size*index_num*sizeof(double),
psp->error_message);
for (i=0;i<psp->ln_k_size;i++) {
k=exp(psp->ln_k[i]);
if (i == (psp->ln_k_size-1)) k *= 0.9999999; // to prevent rounding error leading to k being bigger than maximum value
x=k*R;
W=3./x/x/x*(sin(x)-x*cos(x));
class_call(spectra_pk_at_k_and_z(pba,ppm,psp,k,z,&pk,pk_ic),
psp->error_message,
psp->error_message);
array_for_sigma[i*index_num+index_k]=k;
array_for_sigma[i*index_num+index_y]=k*k*pk*W*W;
}
class_call(array_spline(array_for_sigma,
index_num,
psp->ln_k_size,
index_k,
index_y,
index_ddy,
_SPLINE_EST_DERIV_,
psp->error_message),
psp->error_message,
psp->error_message);
class_call(array_integrate_all_spline(array_for_sigma,
index_num,
psp->ln_k_size,
index_k,
index_y,
index_ddy,
sigma,
psp->error_message),
psp->error_message,
psp->error_message);
free(array_for_sigma);
if (psp->ic_ic_size[psp->index_md_scalars]>1)
free(pk_ic);
*sigma = sqrt(*sigma/(2.*_PI_*_PI_));
return _SUCCESS_;
}
/**
* This routine computes a table of values for all matter power spectra P(k),
* given the source functions and primordial spectra.
*
* @param pba Input : pointer to background structure (will provide density of each species)
* @param ppt Input : pointer to perturbation structure (contain source functions)
* @param psp Input/Output: pointer to spectra structure
* @return the error status
*/
int spectra_matter_transfers(
struct background * pba,
struct perturbs * ppt,
struct spectra * psp
) {
/** Summary: */
/** - define local variables */
int index_md;
int index_ic;
int index_k;
int index_tau;
int last_index_back;
double * pvecback_sp_long; /* array with argument pvecback_sp_long[pba->index_bg] */
double delta_i,theta_i,rho_i;
double delta_rho_tot,rho_tot;
double rho_plus_p_theta_tot,rho_plus_p_tot;
int n_ncdm;
/** - check the presence of scalar modes */
class_test((ppt->has_scalars == _FALSE_),
psp->error_message,
"you cannot ask for matter power spectrum since you turned off scalar modes");
index_md = psp->index_md_scalars;
/** - allocate and fill array of T_i(k,tau) values */
class_alloc(psp->matter_transfer,sizeof(double)*psp->ln_tau_size*psp->ln_k_size*psp->ic_size[index_md]*psp->tr_size,psp->error_message);
/** - allocate temporary vectors where the background quantitites will be stored */
class_alloc(pvecback_sp_long,pba->bg_size*sizeof(double),psp->error_message);
for (index_tau=0 ; index_tau < psp->ln_tau_size; index_tau++) {
class_call(background_at_tau(pba,
ppt->tau_sampling[index_tau-psp->ln_tau_size+ppt->tau_size],
/* for this last argument we could have passed
exp(psp->ln_tau[index_tau]) but we would then loose
precision in the exp(log(x)) operation) */
pba->long_info,
pba->inter_normal,
&last_index_back,
pvecback_sp_long),
pba->error_message,
psp->error_message);
for (index_k=0; index_k<psp->ln_k_size; index_k++) {
for (index_ic = 0; index_ic < psp->ic_size[index_md]; index_ic++) {
delta_rho_tot=0.;
rho_tot=0.;
rho_plus_p_theta_tot=0.;
rho_plus_p_tot=0.;
/* T_g(k,tau) */
rho_i = pvecback_sp_long[pba->index_bg_rho_g];
if (ppt->has_source_delta_g == _TRUE_) {
delta_i = ppt->sources[index_md]
[index_ic * ppt->tp_size[index_md] + ppt->index_tp_delta_g]
[(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k];
psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_delta_g] = delta_i;
delta_rho_tot += rho_i * delta_i;
rho_tot += rho_i;
}
if (ppt->has_source_theta_g == _TRUE_) {
theta_i = ppt->sources[index_md]
[index_ic * ppt->tp_size[index_md] + ppt->index_tp_theta_g]
[(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k];
psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_theta_g] = theta_i;
rho_plus_p_theta_tot += 4./3. * rho_i * theta_i;
rho_plus_p_tot += 4./3. * rho_i;
}
/* T_b(k,tau) */
rho_i = pvecback_sp_long[pba->index_bg_rho_b];
if (ppt->has_source_delta_b == _TRUE_) {
delta_i = ppt->sources[index_md]
[index_ic * ppt->tp_size[index_md] + ppt->index_tp_delta_b]
[(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k];
psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_delta_b] = delta_i;
delta_rho_tot += rho_i * delta_i;
rho_tot += rho_i;
}
if (ppt->has_source_theta_b == _TRUE_) {
theta_i = ppt->sources[index_md]
[index_ic * ppt->tp_size[index_md] + ppt->index_tp_theta_b]
[(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k];
psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_theta_b] = theta_i;
rho_plus_p_theta_tot += rho_i * theta_i;
rho_plus_p_tot += rho_i;
}
/* T_cdm(k,tau) */
if (pba->has_cdm == _TRUE_) {
rho_i = pvecback_sp_long[pba->index_bg_rho_cdm];
if (ppt->has_source_delta_cdm == _TRUE_) {
delta_i = ppt->sources[index_md]
[index_ic * ppt->tp_size[index_md] + ppt->index_tp_delta_cdm]
[(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k];
psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_delta_cdm] = delta_i;
delta_rho_tot += rho_i * delta_i;
rho_tot += rho_i;
}
if (ppt->has_source_theta_cdm == _TRUE_) {
theta_i = ppt->sources[index_md]
[index_ic * ppt->tp_size[index_md] + ppt->index_tp_theta_cdm]
[(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k];
psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_theta_cdm] = theta_i;
rho_plus_p_theta_tot += rho_i * theta_i;
rho_plus_p_tot += rho_i;
}
}
/* T_dcdm(k,tau) */
if (pba->has_dcdm == _TRUE_) {
rho_i = pvecback_sp_long[pba->index_bg_rho_dcdm];
if (ppt->has_source_delta_dcdm == _TRUE_) {
delta_i = ppt->sources[index_md]
[index_ic * ppt->tp_size[index_md] + ppt->index_tp_delta_dcdm]
[(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k];
psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_delta_dcdm] = delta_i;
delta_rho_tot += rho_i * delta_i;
rho_tot += rho_i;
}
if (ppt->has_source_theta_dcdm == _TRUE_) {
theta_i = ppt->sources[index_md]
[index_ic * ppt->tp_size[index_md] + ppt->index_tp_theta_dcdm]
[(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k];
psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_theta_dcdm] = theta_i;
rho_plus_p_theta_tot += rho_i * theta_i;
rho_plus_p_tot += rho_i;
}
}
/* T_scf(k,tau) */
if (pba->has_scf == _TRUE_) {
rho_i = pvecback_sp_long[pba->index_bg_rho_scf];
if (ppt->has_source_delta_scf == _TRUE_) {
delta_i = ppt->sources[index_md]
[index_ic * ppt->tp_size[index_md] + ppt->index_tp_delta_scf]
[(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k];
psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_delta_scf] = delta_i;
delta_rho_tot += rho_i * delta_i;
rho_tot += rho_i;
}
if (ppt->has_source_theta_scf == _TRUE_) {
theta_i = ppt->sources[index_md]
[index_ic * ppt->tp_size[index_md] + ppt->index_tp_theta_scf]
[(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k];
psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_theta_scf] = theta_i;
rho_plus_p_theta_tot += (rho_i + pvecback_sp_long[pba->index_bg_p_scf]) * theta_i;
rho_plus_p_tot += (rho_i + pvecback_sp_long[pba->index_bg_p_scf]);
}
}
/* T_fld(k,tau) */
if (pba->has_fld == _TRUE_) {
rho_i = pvecback_sp_long[pba->index_bg_rho_fld];
if (ppt->has_source_delta_fld == _TRUE_) {
delta_i = ppt->sources[index_md]
[index_ic * ppt->tp_size[index_md] + ppt->index_tp_delta_fld]
[(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k];
psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_delta_fld] = delta_i;
delta_rho_tot += rho_i * delta_i;
rho_tot += rho_i;
}
if (ppt->has_source_theta_fld == _TRUE_) {
theta_i = ppt->sources[index_md]
[index_ic * ppt->tp_size[index_md] + ppt->index_tp_theta_fld]
[(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k];
psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_theta_fld] = theta_i;
rho_plus_p_theta_tot += (1. + pba->w0_fld + pba->wa_fld * (1. - pvecback_sp_long[pba->index_bg_a] / pba->a_today)) * rho_i * theta_i;
rho_plus_p_tot += (1. + pba->w0_fld + pba->wa_fld * (1. - pvecback_sp_long[pba->index_bg_a] / pba->a_today)) * rho_i;
}
}
/* T_ur(k,tau) */
if (pba->has_ur == _TRUE_) {
rho_i = pvecback_sp_long[pba->index_bg_rho_ur];
if (ppt->has_source_delta_ur == _TRUE_) {
delta_i = ppt->sources[index_md]
[index_ic * ppt->tp_size[index_md] + ppt->index_tp_delta_ur]
[(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k];
psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_delta_ur] = delta_i;
delta_rho_tot += rho_i * delta_i;
rho_tot += rho_i;
}
if (ppt->has_source_theta_ur == _TRUE_) {
theta_i = ppt->sources[index_md]
[index_ic * ppt->tp_size[index_md] + ppt->index_tp_theta_ur]
[(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k];
psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_theta_ur] = theta_i;
rho_plus_p_theta_tot += 4./3. * rho_i * theta_i;
rho_plus_p_tot += 4./3. * rho_i;
}
}
/* T_dr(k,tau) */
if (pba->has_dr == _TRUE_) {
rho_i = pvecback_sp_long[pba->index_bg_rho_dr];
if (ppt->has_source_delta_dr == _TRUE_) {
delta_i = ppt->sources[index_md]
[index_ic * ppt->tp_size[index_md] + ppt->index_tp_delta_dr]
[(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k];
psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_delta_dr] = delta_i;
delta_rho_tot += rho_i * delta_i;
rho_tot += rho_i;
}
if (ppt->has_source_theta_dr == _TRUE_) {
theta_i = ppt->sources[index_md]
[index_ic * ppt->tp_size[index_md] + ppt->index_tp_theta_dr]
[(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k];
psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_theta_dr] = theta_i;
rho_plus_p_theta_tot += 4./3. * rho_i * theta_i;
rho_plus_p_tot += 4./3. * rho_i;
}
}
/* T_ncdm_i(k,tau) */
if (pba->has_ncdm == _TRUE_) {
for (n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++) {
rho_i = pvecback_sp_long[pba->index_bg_rho_ncdm1+n_ncdm];
if (ppt->has_source_delta_ncdm == _TRUE_) {
delta_i = ppt->sources[index_md]
[index_ic * ppt->tp_size[index_md] + ppt->index_tp_delta_ncdm1+n_ncdm]
[(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k];
psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_delta_ncdm1+n_ncdm] = delta_i;
delta_rho_tot += rho_i * delta_i;
rho_tot += rho_i;
}
if (ppt->has_source_theta_ncdm == _TRUE_) {
theta_i = ppt->sources[index_md]
[index_ic * ppt->tp_size[index_md] + ppt->index_tp_theta_ncdm1+n_ncdm]
[(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k];
psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_theta_ncdm1+n_ncdm] = theta_i;
rho_plus_p_theta_tot += (rho_i + pvecback_sp_long[pba->index_bg_p_ncdm1+n_ncdm]) * theta_i;
rho_plus_p_tot += (rho_i + pvecback_sp_long[pba->index_bg_p_ncdm1+n_ncdm]);
}
}
}
/* could include homogeneous component in rho_tot if uncommented (leave commented to match CMBFAST/CAMB definition) */
/* if (pba->has_lambda == _TRUE_) { */
/* rho_i = pvecback_sp_long[pba->index_bg_rho_lambda]; */
/* rho_tot += rho_i; */
/* } */
/* T_tot(k,tau) */
if (ppt->has_density_transfers == _TRUE_) {
psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_delta_tot] = delta_rho_tot/rho_tot;
}
if (ppt->has_velocity_transfers == _TRUE_) {
psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_theta_tot] = rho_plus_p_theta_tot/rho_plus_p_tot;
}
}
}
}
/**- if interpolation of P(k,tau) will be needed (as a function of tau),
compute array of second derivatives in view of spline interpolation */
if (psp->ln_tau_size > 1) {
class_alloc(psp->ddmatter_transfer,sizeof(double)*psp->ln_tau_size*psp->ln_k_size*psp->ic_size[index_md]*psp->tr_size,psp->error_message);
class_call(array_spline_table_lines(psp->ln_tau,
psp->ln_tau_size,
psp->matter_transfer,
psp->ic_size[index_md]*psp->ln_k_size*psp->tr_size,
psp->ddmatter_transfer,
_SPLINE_EST_DERIV_,
psp->error_message),
psp->error_message,
psp->error_message);
}
free (pvecback_sp_long);
return _SUCCESS_;
}
int spectra_output_tk_titles(struct background *pba,
struct perturbs *ppt,
enum file_format output_format,
char titles[_MAXTITLESTRINGLENGTH_]
){
int n_ncdm;
char tmp[40];
if (output_format == class_format) {
class_store_columntitle(titles,"k (h/Mpc)",_TRUE_);
if (ppt->has_density_transfers == _TRUE_) {
class_store_columntitle(titles,"d_g",_TRUE_);
class_store_columntitle(titles,"d_b",_TRUE_);
class_store_columntitle(titles,"d_cdm",pba->has_cdm);
class_store_columntitle(titles,"d_fld",pba->has_fld);
class_store_columntitle(titles,"d_ur",pba->has_ur);
if (pba->has_ncdm == _TRUE_) {
for (n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++) {
sprintf(tmp,"d_ncdm[%d]",n_ncdm);
class_store_columntitle(titles,tmp,_TRUE_);
}
}
class_store_columntitle(titles,"d_dcdm",pba->has_dcdm);
class_store_columntitle(titles,"d_dr",pba->has_dr);
class_store_columntitle(titles,"d_scf",pba->has_scf);
class_store_columntitle(titles,"d_tot",_TRUE_);
}
if (ppt->has_velocity_transfers == _TRUE_) {
class_store_columntitle(titles,"t_g",_TRUE_);
class_store_columntitle(titles,"t_b",_TRUE_);
class_store_columntitle(titles,"t_cdm",((pba->has_cdm == _TRUE_) && (ppt->gauge != synchronous)));
class_store_columntitle(titles,"t_fld",pba->has_fld);
class_store_columntitle(titles,"t_ur",pba->has_ur);
if (pba->has_ncdm == _TRUE_) {
for (n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++) {
sprintf(tmp,"t_ncdm[%d]",n_ncdm);
class_store_columntitle(titles,tmp,_TRUE_);
}
}
class_store_columntitle(titles,"t_dcdm",pba->has_dcdm);
class_store_columntitle(titles,"t_dr",pba->has_dr);
class_store_columntitle(titles,"t__scf",pba->has_scf);
class_store_columntitle(titles,"t_tot",_TRUE_);
}
}
else if (output_format == camb_format) {
class_store_columntitle(titles,"k (h/Mpc)",_TRUE_);
class_store_columntitle(titles,"-T_cdm/k2",_TRUE_);
class_store_columntitle(titles,"-T_b/k2",_TRUE_);
class_store_columntitle(titles,"-T_g/k2",_TRUE_);
class_store_columntitle(titles,"-T_ur/k2",_TRUE_);
class_store_columntitle(titles,"-T_ncdm/k2",_TRUE_);
class_store_columntitle(titles,"-T_tot/k2",_TRUE_);
}
return _SUCCESS_;
}
int spectra_output_tk_data(
struct background * pba,
struct perturbs * ppt,
struct spectra * psp,
enum file_format output_format,
double z,
int number_of_titles,
double *data
) {
int n_ncdm;
double k, k_over_h, k2;
double * tkfull=NULL; /* array with argument
pk_ic[(index_k * psp->ic_size[index_md] + index_ic)*psp->tr_size+index_tr] */
double *tk;
double *dataptr;
int index_md=0;
int index_ic;
int index_k;
int index_tr;
int storeidx;
if (psp->ln_k_size*psp->ic_size[index_md]*psp->tr_size > 0){
class_alloc(tkfull,
psp->ln_k_size*psp->ic_size[index_md]*psp->tr_size*sizeof(double),
psp->error_message);
}
/** - compute T_i(k) for each k (if several ic's, compute it for each ic; if z_pk = 0, this is done by directly reading inside the pre-computed table; if not, this is done by interpolating the table at the correct value of tau. */
/* if z_pk = 0, no interpolation needed */
if (z == 0.) {
for (index_k=0; index_k<psp->ln_k_size; index_k++) {
for (index_tr=0; index_tr<psp->tr_size; index_tr++) {
for (index_ic=0; index_ic<psp->ic_size[index_md]; index_ic++) {
tkfull[(index_k * psp->ic_size[index_md] + index_ic) * psp->tr_size + index_tr] = psp->matter_transfer[(((psp->ln_tau_size-1)*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + index_tr];
}
}
}
}
/* if 0 <= z_pk <= z_max_pk, interpolation needed, */
else {
class_call(spectra_tk_at_z(pba,
psp,
z,
tkfull),
psp->error_message,
psp->error_message);
}
/** - store data */
for (index_ic = 0; index_ic < psp->ic_size[index_md]; index_ic++) {
for (index_k=0; index_k<psp->ln_k_size; index_k++) {
storeidx = 0;
dataptr = data+index_ic*(psp->ln_k_size*number_of_titles)+index_k*number_of_titles;
tk = &(tkfull[(index_k * psp->ic_size[index_md] + index_ic) * psp->tr_size]);
k = exp(psp->ln_k[index_k]);
k2 = k*k;
k_over_h = k/pba->h;
class_store_double(dataptr, k_over_h, _TRUE_,storeidx);
/* indices for species associated with a velocity transfer function in Fourier space */
if (output_format == class_format) {
if (ppt->has_density_transfers == _TRUE_) {
class_store_double(dataptr,tk[psp->index_tr_delta_g],ppt->has_source_delta_g,storeidx);
class_store_double(dataptr,tk[psp->index_tr_delta_b],ppt->has_source_delta_b,storeidx);
class_store_double(dataptr,tk[psp->index_tr_delta_cdm],ppt->has_source_delta_cdm,storeidx);
class_store_double(dataptr,tk[psp->index_tr_delta_fld],ppt->has_source_delta_fld,storeidx);
class_store_double(dataptr,tk[psp->index_tr_delta_ur],ppt->has_source_delta_ur,storeidx);
if (pba->has_ncdm == _TRUE_){
for (n_ncdm = 0; n_ncdm < pba->N_ncdm; n_ncdm++){
class_store_double(dataptr,tk[psp->index_tr_delta_ncdm1+n_ncdm],ppt->has_source_delta_ncdm,storeidx);
}
}
class_store_double(dataptr,tk[psp->index_tr_delta_dcdm],ppt->has_source_delta_dcdm,storeidx);
class_store_double(dataptr,tk[psp->index_tr_delta_dr],ppt->has_source_delta_dr,storeidx);
class_store_double(dataptr,tk[psp->index_tr_delta_scf],ppt->has_source_delta_scf,storeidx);
class_store_double(dataptr,tk[psp->index_tr_delta_tot],_TRUE_,storeidx);
}
if (ppt->has_velocity_transfers == _TRUE_) {
class_store_double(dataptr,tk[psp->index_tr_theta_g],ppt->has_source_theta_g,storeidx);
class_store_double(dataptr,tk[psp->index_tr_theta_b],ppt->has_source_theta_b,storeidx);
class_store_double(dataptr,tk[psp->index_tr_theta_cdm],ppt->has_source_theta_cdm,storeidx);
class_store_double(dataptr,tk[psp->index_tr_theta_fld],ppt->has_source_theta_fld,storeidx);
class_store_double(dataptr,tk[psp->index_tr_theta_ur],ppt->has_source_theta_ur,storeidx);
if (pba->has_ncdm == _TRUE_){
for (n_ncdm = 0; n_ncdm < pba->N_ncdm; n_ncdm++){
class_store_double(dataptr,tk[psp->index_tr_theta_ncdm1+n_ncdm],ppt->has_source_theta_ncdm,storeidx);
}
}
class_store_double(dataptr,tk[psp->index_tr_theta_dcdm],ppt->has_source_theta_dcdm,storeidx);
class_store_double(dataptr,tk[psp->index_tr_theta_dr],ppt->has_source_theta_dr,storeidx);
class_store_double(dataptr,tk[psp->index_tr_theta_scf],ppt->has_source_theta_scf,storeidx);
class_store_double(dataptr,tk[psp->index_tr_theta_tot],_TRUE_,storeidx);
}
}
else if (output_format == camb_format) {
/* rescale and reorder the matter transfer functions following the CMBFAST/CAMB convention */
class_store_double_or_default(dataptr,-tk[psp->index_tr_delta_cdm]/k2,ppt->has_source_delta_cdm,storeidx,0.0);
class_store_double_or_default(dataptr,-tk[psp->index_tr_delta_b]/k2,ppt->has_source_delta_b,storeidx,0.0);
class_store_double_or_default(dataptr,-tk[psp->index_tr_delta_g]/k2,ppt->has_source_delta_g,storeidx,0.0);
class_store_double_or_default(dataptr,-tk[psp->index_tr_delta_ur]/k2,ppt->has_source_delta_ur,storeidx,0.0);
class_store_double_or_default(dataptr,-tk[psp->index_tr_delta_ncdm1]/k2,ppt->has_source_delta_ncdm,storeidx,0.0);
class_store_double_or_default(dataptr,-tk[psp->index_tr_delta_tot]/k2,_TRUE_,storeidx,0.0);
}
}
}
//Neccessary because the size could be zero (if psp->tr_size is zero)
if (tkfull != NULL)
free(tkfull);
return _SUCCESS_;
}
int spectra_firstline_and_ic_suffix(struct perturbs *ppt,
int index_ic,
char first_line[_LINE_LENGTH_MAX_],
FileName ic_suffix){
first_line[0]='\0';
ic_suffix[0]='\0';
if ((ppt->has_ad == _TRUE_) && (index_ic == ppt->index_ic_ad)) {
strcpy(ic_suffix,"ad");
strcpy(first_line,"for adiabatic (AD) mode (normalized to initial curvature=1) ");
}
if ((ppt->has_bi == _TRUE_) && (index_ic == ppt->index_ic_bi)) {
strcpy(ic_suffix,"bi");
strcpy(first_line,"for baryon isocurvature (BI) mode (normalized to initial entropy=1)");
}
if ((ppt->has_cdi == _TRUE_) && (index_ic == ppt->index_ic_cdi)) {
strcpy(ic_suffix,"cdi");
strcpy(first_line,"for CDM isocurvature (CDI) mode (normalized to initial entropy=1)");
}
if ((ppt->has_nid == _TRUE_) && (index_ic == ppt->index_ic_nid)) {
strcpy(ic_suffix,"nid");
strcpy(first_line,"for neutrino density isocurvature (NID) mode (normalized to initial entropy=1)");
}
if ((ppt->has_niv == _TRUE_) && (index_ic == ppt->index_ic_niv)) {
strcpy(ic_suffix,"niv");
strcpy(first_line,"for neutrino velocity isocurvature (NIV) mode (normalized to initial entropy=1)");
}
return _SUCCESS_;
}
|
PSFHandle.h | #pragma once
#include "ps/psf/PSFunc.h"
#include "common/thread_safe_hash_map.h"
#include "param.h"
#include <algorithm>
#include <utility>
#include <mutex>
#include <omp.h>
#include <random>
#include <fstream>
namespace ps {
template <>
class PSHandler<PsfGroup::kParameterServer>
: public PSHandler<PsfGroup::kBaseGroup> {
public:
PSHandler<PsfGroup::kParameterServer>() {
}
PSHandler<PsfGroup::kParameterServer>(
const PSHandler<PsfGroup::kParameterServer> &handle) {
}
void serve(const PSFData<DensePull>::Request &request,
PSFData<DensePull>::Response &response) {
Key k = get<0>(request);
size_t len = get<1>(request);
SArray<float> &pull_vals = get<0>(response);
auto iter = const_store.find(k);
if (iter != const_store.end()) {
auto &value_set_ = *iter->second;
size_t data_size = value_set_.size();
CHECK_EQ(len, data_size) << " size mismatch in DensePull " << k
<< " " << len << " " << data_size;
pull_vals.resize(data_size);
auto read_lock = value_set_.read_guard();
std::copy(value_set_.begin(), value_set_.end(), pull_vals.begin());
} else {
LG << "Key does not exist on PS in DensePull" << k;
}
}
void serve(const PSFData<DensePush>::Request &request,
PSFData<DensePush>::Response &response) {
Key k = get<0>(request);
size_t len = get<1>(request);
SArray<float> vals = get<2>(request);
if (const_store.find(k) == const_store.end()) {
store[k] = std::make_shared<Param<float>>(len, OptType::None,
SArray<float>());
}
auto iter = const_store.find(k);
if (iter != const_store.end()) {
CHECK_EQ(len, iter->second->size())
<< k << " " << len << " " << iter->second->size()
<< " size mismatch in DensePush";
// write, discard const qualifier
auto &value_set_ =
*const_cast<typename tmap::mapped_type &>(iter->second);
auto write_lock = value_set_.write_guard();
#pragma omp parallel for num_threads(4)
for (size_t j = 0; j < value_set_.size(); j++)
value_set_[j] += vals[j];
} else {
LG << "Key does not exist on PS in DensePull" << k;
}
}
void serve(const PSFData<DDPushPull>::Request &request,
PSFData<DDPushPull>::Response &response) {
// one key per request.
// with response result
Key k = get<0>(request);
size_t len = get<1>(request);
SArray<float> vals = get<2>(request);
SArray<float> &pull_vals = get<0>(response);
auto iter = const_store.find(k);
if (iter != const_store.end()) {
auto &value_set_ =
*const_cast<typename tmap::mapped_type &>(iter->second);
size_t data_size = value_set_.size();
CHECK_EQ(len, data_size)
<< " size mismatch in DDPushPull " << len << " " << data_size;
pull_vals.resize(data_size);
auto write_lock = value_set_.write_guard();
#pragma omp parallel for num_threads(4)
for (size_t j = 0; j < data_size; j++) {
value_set_[j] += vals[j];
pull_vals[j] = value_set_[j];
}
} else {
LG << "Key does not exist on PS in DensePull" << k;
}
}
void serve(const PSFData<SparsePull>::Request &request,
PSFData<SparsePull>::Response &response) {
// we use length as the offset, i.e., #length = #vals.
// with response result
Key k = get<0>(request);
SArray<size_t> offset = get<1>(request);
SArray<float> &pull_vals = get<0>(response);
auto iter = const_store.find(k);
if (iter != const_store.end()) {
auto &value_set_ =
*std::dynamic_pointer_cast<Param2D<float>>(iter->second);
size_t width = value_set_.width;
pull_vals.resize(offset.size() * width);
auto read_lock = value_set_.read_guard();
#pragma omp parallel for num_threads(4)
for (size_t j = 0; j < offset.size(); ++j) {
auto value_begin = value_set_.data() + offset[j] * width;
auto value_end = value_begin + width;
auto dst_begin = pull_vals.data() + j * width;
std::copy(value_begin, value_end, dst_begin);
}
} else {
// error, the key does not exist on PS.
LF << "[Error] The pulled key: " << k
<< " does not exist on PS in SparsePull.";
}
}
void serve(const PSFData<SparsePush>::Request &request,
PSFData<SparsePush>::Response &response) {
// we use length as the offset, i.e., #length = #vals.
// no response result
Key k = get<0>(request);
SArray<size_t> offsets = get<1>(request);
SArray<float> vals = get<2>(request);
auto iter = const_store.find(k);
if (iter != const_store.end()) {
auto &value_set_ =
*std::dynamic_pointer_cast<Param2D<float>>(iter->second);
size_t width = value_set_.width;
CHECK_EQ(vals.size(), offsets.size() * width)
<< " in Psf::SparsePush check failed,"
<< " size of vals is " << vals.size() << " size of lens is "
<< offsets.size() << " size of width is " << width;
// write, discard const qualifier
auto write_lock = value_set_.write_guard();
#pragma omp parallel for num_threads(4)
for (size_t j = 0; j < offsets.size(); ++j) {
size_t src_offset = j * width;
size_t dst_offset = offsets[j] * width;
for (size_t k = 0; k < width; ++k) {
value_set_[dst_offset + k] += vals[src_offset + k];
}
}
} else {
// error, the key does not exist on PS.
LF << "[Error] The pushed key: " << k
<< " does not exist on PS in SparsePush.";
}
}
void serve(const PSFData<SDPushPull>::Request &request,
PSFData<SDPushPull>::Response &response) {
Key k = get<0>(request);
SArray<size_t> offsets = get<1>(request);
SArray<float> vals = get<2>(request);
size_t len = get<3>(request);
SArray<float> &pull_vals = get<0>(response);
auto iter = const_store.find(k);
if (iter != const_store.end()) {
auto &value_set_ =
*std::dynamic_pointer_cast<Param2D<float>>(iter->second);
size_t width = value_set_.width;
CHECK_EQ(len, value_set_.size())
<< " size mismatch in SDPushPull " << k << " " << len << " "
<< value_set_.size();
// sparsepush phase
if (vals.size() > 0) {
CHECK_EQ(vals.size(), offsets.size() * width)
<< " in Psf::SDPushPull check failed,"
<< " size of vals is " << vals.size() << " size of lens is "
<< offsets.size() << " size of width is " << width;
// write, discard const qualifier
auto write_lock = value_set_.write_guard();
#pragma omp parallel for num_threads(4)
for (size_t j = 0; j < offsets.size(); ++j) {
size_t src_offset = j * width;
size_t dst_offset = offsets[j] * width;
for (size_t k = 0; k < width; ++k) {
value_set_[dst_offset + k] += vals[src_offset + k];
}
}
}
// densepull phase
pull_vals.resize(value_set_.size());
auto read_lock = value_set_.read_guard();
std::copy(value_set_.begin(), value_set_.end(), pull_vals.begin());
} else {
// error, the key does not exist on PS.
LF << "[Error] The pushed key: " << k
<< " does not exist on PS in SDPushPull.";
}
}
void serve(const PSFData<SSPushPull>::Request &request,
PSFData<SSPushPull>::Response &response) {
Key k = get<0>(request);
SArray<size_t> push_offsets = get<1>(request);
SArray<float> vals = get<2>(request);
SArray<size_t> pull_offsets = get<3>(request);
SArray<float> &pull_vals = get<0>(response);
auto iter = const_store.find(k);
if (iter != const_store.end()) {
auto &value_set_ =
*std::dynamic_pointer_cast<Param2D<float>>(iter->second);
size_t width = value_set_.width;
// sparsepush phase
if (vals.size() > 0) {
CHECK_EQ(vals.size(), push_offsets.size() * width)
<< " in Psf::SSPushPull check failed,"
<< " size of vals is " << vals.size() << " size of lens is "
<< push_offsets.size() << " size of width is " << width;
// write, discard const qualifier
auto write_lock = value_set_.write_guard();
#pragma omp parallel for num_threads(4)
for (size_t j = 0; j < push_offsets.size(); ++j) {
size_t src_offset = j * width;
size_t dst_offset = push_offsets[j] * width;
for (size_t k = 0; k < width; ++k) {
value_set_[dst_offset + k] += vals[src_offset + k];
}
}
}
// sparsepull phase
if (pull_offsets.size() > 0) {
pull_vals.resize(pull_offsets.size() * width);
auto read_lock = value_set_.read_guard();
#pragma omp parallel for num_threads(4)
for (size_t j = 0; j < pull_offsets.size(); ++j) {
auto val_begin =
value_set_.begin() + pull_offsets[j] * width;
auto val_end = val_begin + width;
auto dst_begin = pull_vals.begin() + j * width;
std::copy(val_begin, val_end, dst_begin);
}
}
} else {
// error, the key does not exist on PS.
LF << "[Error] The pushed key: " << k
<< " does not exist on PS in SparsePush.";
}
}
void serve(const PSFData<kSyncEmbedding>::Request &request,
PSFData<kSyncEmbedding>::Response &response);
void serve(const PSFData<kPushEmbedding>::Request &request,
PSFData<kPushEmbedding>::Response &response);
void serve(const PSFData<kPushSyncEmbedding>::Request &request,
PSFData<kPushSyncEmbedding>::Response &response);
void serve(const PSFData<ParamInit>::Request &request,
PSFData<ParamInit>::Response &response) {
// one key per request.
// no response result
Key k = get<0>(request);
ParamType param_type = (ParamType)get<1>(request);
size_t len = get<2>(request);
size_t width = get<3>(request);
InitType init_type = (InitType)get<4>(request);
double init_a = get<5>(request);
double init_b = get<6>(request);
unsigned long long seed = get<7>(request);
OptType otype = (OptType)get<8>(request);
SArray<float> lrs = get<9>(request);
if (!try_init_with_no_conflict(k))
return;
Param<float> *newParam = nullptr;
switch (param_type) {
case kParam:
newParam = new Param<float>(len, otype, lrs);
break;
case kParam2D:
newParam = new Param2D<float>(len, width, otype, lrs);
break;
case kCacheTable:
newParam = new CacheTable<float>(len, width, otype, lrs);
}
auto iter = store.find(k);
iter->second = tmap::mapped_type(newParam);
CHECK_EQ(len * width, iter->second->size())
<< k << " " << len << " " << width << " " << iter->second->size()
<< " size mismatch in UniformInit";
// write, discard const qualifier
auto &value_set_ =
*const_cast<typename tmap::mapped_type &>(iter->second);
auto write_lock = value_set_.write_guard();
size_t n_threads = (value_set_.size() >> 25) + 1;
if (n_threads > 16)
n_threads = 16;
if (init_type == InitType::Constant) {
float filled_value = static_cast<float>(init_a);
// #pragma omp parallel for num_threads(4)
for (size_t j = 0; j < value_set_.size(); j++)
value_set_[j] = filled_value;
} else if (init_type == InitType::Uniform) {
std::uniform_real_distribution<float> uniform_dist(init_a, init_b);
#pragma omp parallel num_threads(n_threads)
{
size_t rank = omp_get_thread_num();
size_t num_threads = omp_get_num_threads();
std::default_random_engine generator(seed + rank);
size_t length = value_set_.size() / num_threads;
size_t start = rank * length;
size_t ending = start + length;
if (rank == num_threads - 1)
ending = value_set_.size();
for (size_t j = start; j < ending; ++j) {
value_set_[j] = uniform_dist(generator);
}
}
} else if (init_type == InitType::Normal) {
std::normal_distribution<float> normal_dist(init_a, init_b);
#pragma omp parallel num_threads(n_threads)
{
size_t rank = omp_get_thread_num();
size_t num_threads = omp_get_num_threads();
std::default_random_engine generator(seed + rank);
size_t length = value_set_.size() / num_threads;
size_t start = rank * length;
size_t ending = start + length;
if (rank == num_threads - 1)
ending = value_set_.size();
for (size_t j = start; j < ending; ++j) {
value_set_[j] = normal_dist(generator);
}
}
} else if (init_type == InitType::TruncatedNormal) {
std::normal_distribution<float> truncated_normal_dist(init_a,
init_b);
float upper_limit = init_a + 2 * init_b;
float lower_limit = init_a - 2 * init_b;
#pragma omp parallel num_threads(n_threads)
{
size_t rank = omp_get_thread_num();
size_t num_threads = omp_get_num_threads();
std::default_random_engine generator(seed + rank);
size_t length = value_set_.size() / num_threads;
size_t start = rank * length;
size_t ending = start + length;
if (rank == num_threads - 1)
ending = value_set_.size();
for (size_t j = start; j < ending; ++j) {
float temp = truncated_normal_dist(generator);
while (temp > upper_limit || temp < lower_limit)
temp = truncated_normal_dist(generator);
value_set_[j] = temp;
}
}
}
}
void serve(const PSFData<ParamClear>::Request &request,
PSFData<ParamClear>::Response &response) {
Key k = get<0>(request);
auto iter = store.find(k);
if (iter != store.end()) {
store.erase(iter);
} else {
// error, the key does not exist on PS.
LF << "[Error] The pushed key: " << k
<< " does not exist on PS in ParamClear.";
}
}
void serve(const PSFData<ParamSave>::Request &request,
PSFData<ParamSave>::Response &response) {
Key k = get<0>(request);
SArray<char> address = get<1>(request);
auto iter = store.find(k);
if (iter != store.end()) {
auto &value_set_ = *iter->second;
auto read_lock = value_set_.read_guard();
std::ofstream fout(
std::string(address.data(), address.size()).c_str(),
std::ios::binary);
fout.write((char *)value_set_.data(),
value_set_.size() * sizeof(float));
} else {
// error, the key does not exist on PS.
LF << "[Error] The pushed key: " << k
<< " does not exist on PS in ParamSave.";
}
}
void serve(const PSFData<ParamLoad>::Request &request,
PSFData<ParamLoad>::Response &response) {
Key k = get<0>(request);
SArray<char> address = get<1>(request);
auto iter = store.find(k);
if (iter != store.end()) {
auto &value_set_ = *iter->second;
auto write_lock = value_set_.write_guard();
std::ifstream fin(
std::string(address.data(), address.size()).c_str(),
std::ios::binary);
fin.read((char *)value_set_.data(),
value_set_.size() * sizeof(float));
} else {
// error, the key does not exist on PS.
LF << "[Error] The pushed key: " << k
<< " does not exist on PS in ParamLoad.";
}
}
private:
bool try_init_with_no_conflict(Key key) {
static std::mutex init_mtx;
std::lock_guard<std::mutex> lock(init_mtx);
if (store.find(key) != store.end())
return false;
else {
store[key] = tmap::mapped_type();
return true;
}
}
typedef threadsafe_unordered_map<Key, std::shared_ptr<Param<float>>> tmap;
tmap store;
const tmap &const_store =
store; // const reference to force compiler to use read lock
};
} // namespace ps
|
avx512vnni_gemm.h | #pragma once
#include "intgemm/intgemm_config.h"
#ifdef INTGEMM_COMPILER_SUPPORTS_AVX512VNNI
#include "avx512_gemm.h"
#include "types.h"
namespace intgemm {
namespace AVX512VNNI {
// Workaround extra vmovdqa64 https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94663
INTGEMM_AVX512VNNI static inline void VNNI8(__m512i &c, __m512i a, __m512i b) {
#if defined(__GNUC__) && !defined(__clang__) && !defined(__INTEL_COMPILER)
asm ("vpdpbusds %2, %1, %0" : "+x"(c) : "x"(a), "mx"(b));
#else
c = _mm512_dpbusds_epi32(c, a, b);
#endif
}
struct Kernels8 : public AVX512BW::Kernels8 {
template <typename Callback>
INTGEMM_AVX512VNNI static void Multiply(const int8_t *A, const int8_t *B, Index A_rows, Index width, Index B_cols, Callback callback) {
assert(width % sizeof(Register) == 0);
assert(B_cols % 8 == 0);
assert(reinterpret_cast<uintptr_t>(A) % sizeof(Register) == 0);
assert(reinterpret_cast<uintptr_t>(B) % sizeof(Register) == 0);
auto callback_impl = callbacks::CallbackImpl<CPUType::AVX2, Callback>(callback);
const Index simd_width = width / sizeof(Register);
Register zeros = setzero_si<Register>();
// Go over 8 columns of B at a time.
#pragma omp for
for (Index B0_colidx = 0; B0_colidx < B_cols; B0_colidx += 8) {
const Register *B0_col = reinterpret_cast<const Register*>(B) + B0_colidx * simd_width;
// Process one row of A at a time. Doesn't seem to be faster to do multiple rows of A at once.
for (Index A_rowidx = 0; A_rowidx < A_rows; ++A_rowidx) {
// Iterate over shared (inner) dimension.
const Register *A_live = reinterpret_cast<const Register *>(A + A_rowidx * width);
const Register *A_end = A_live + simd_width;
const Register *B_live = B0_col;
// TODO: separate first step.
Register sum0 = zeros, sum1 = zeros, sum2 = zeros, sum3 = zeros, sum4 = zeros, sum5 = zeros, sum6 = zeros, sum7 = zeros;
for (; A_live != A_end; ++A_live, B_live += 8) {
Register a = *A_live;
// Retrieve the conveniently consecutive values of B.
Register b0 = *B_live;
Register b1 = *(B_live + 1);
Register b2 = *(B_live + 2);
Register b3 = *(B_live + 3);
Register b4 = *(B_live + 4);
Register b5 = *(B_live + 5);
Register b6 = *(B_live + 6);
Register b7 = *(B_live + 7);
// Get a mask where a is negative.
__mmask64 neg_mask = _mm512_test_epi8_mask(a, _mm512_set1_epi8(-128));
Register a_positive = _mm512_abs_epi8(a);
// Negate by subtracting from zero with a mask.
b0 = _mm512_mask_sub_epi8(b0, neg_mask, zeros, b0);
b1 = _mm512_mask_sub_epi8(b1, neg_mask, zeros, b1);
b2 = _mm512_mask_sub_epi8(b2, neg_mask, zeros, b2);
b3 = _mm512_mask_sub_epi8(b3, neg_mask, zeros, b3);
b4 = _mm512_mask_sub_epi8(b4, neg_mask, zeros, b4);
b5 = _mm512_mask_sub_epi8(b5, neg_mask, zeros, b5);
b6 = _mm512_mask_sub_epi8(b6, neg_mask, zeros, b6);
b7 = _mm512_mask_sub_epi8(b7, neg_mask, zeros, b7);
VNNI8(sum0, a_positive, b0);
VNNI8(sum1, a_positive, b1);
VNNI8(sum2, a_positive, b2);
VNNI8(sum3, a_positive, b3);
VNNI8(sum4, a_positive, b4);
VNNI8(sum5, a_positive, b5);
VNNI8(sum6, a_positive, b6);
VNNI8(sum7, a_positive, b7);
}
Register pack0123 = Pack0123(sum0, sum1, sum2, sum3);
Register pack4567 = Pack0123(sum4, sum5, sum6, sum7);
auto total = PermuteSummer(pack0123, pack4567);
callback_impl.Run(total, callbacks::OutputBufferInfo(A_rowidx, B0_colidx, A_rows, B_cols));
}
}
}
template <typename Callback>
INTGEMM_AVX512VNNI static void Multiply8Shift(const uint8_t *A, const int8_t *B, Index A_rows, Index width, Index B_cols, Callback callback) {
assert(width % sizeof(Register) == 0);
assert(B_cols % 8 == 0);
assert(reinterpret_cast<uintptr_t>(A) % sizeof(Register) == 0);
assert(reinterpret_cast<uintptr_t>(B) % sizeof(Register) == 0);
auto callback_impl = callbacks::CallbackImpl<CPUType::AVX2, Callback>(callback);
const Index simd_width = width / sizeof(Register);
Register zeros = setzero_si<Register>();
// Go over 8 columns of B at a time.
#pragma omp for
for (Index B0_colidx = 0; B0_colidx < B_cols; B0_colidx += 8) {
const Register *B0_col = reinterpret_cast<const Register*>(B) + B0_colidx * simd_width;
// Process one row of A at a time. Doesn't seem to be faster to do multiple rows of A at once.
for (Index A_rowidx = 0; A_rowidx < A_rows; ++A_rowidx) {
// Iterate over shared (inner) dimension.
const Register *A_live = reinterpret_cast<const Register *>(A + A_rowidx * width);
const Register *A_end = A_live + simd_width;
const Register *B_live = B0_col;
// TODO: separate first step.
Register sum0 = zeros, sum1 = zeros, sum2 = zeros, sum3 = zeros, sum4 = zeros, sum5 = zeros, sum6 = zeros, sum7 = zeros;
for (; A_live != A_end; ++A_live, B_live += 8) {
Register a = *A_live;
//MultiplyAdd
VNNI8(sum0, a, *B_live);
VNNI8(sum1, a, *(B_live + 1));
VNNI8(sum2, a, *(B_live + 2));
VNNI8(sum3, a, *(B_live + 3));
VNNI8(sum4, a, *(B_live + 4));
VNNI8(sum5, a, *(B_live + 5));
VNNI8(sum6, a, *(B_live + 6));
VNNI8(sum7, a, *(B_live + 7));
}
Register pack0123 = Pack0123(sum0, sum1, sum2, sum3);
Register pack4567 = Pack0123(sum4, sum5, sum6, sum7);
auto total = PermuteSummer(pack0123, pack4567);
callback_impl.Run(total, callbacks::OutputBufferInfo(A_rowidx, B0_colidx, A_rows, B_cols));
}
}
}
template <typename Callback>
INTGEMM_AVX512VNNI static void PrepareBias(const int8_t *B, Index width, Index B_cols, Callback callback) {
assert(width % sizeof(Register) == 0);
assert(B_cols % 8 == 0);
assert(reinterpret_cast<uintptr_t>(B) % sizeof(Register) == 0);
auto callback_impl = callbacks::CallbackImpl<CPUType::AVX2, Callback>(callback);
Index simd_width = width / sizeof(Register);
Register zeros = setzero_si<Register>();
const Register a = set1_epi8<Register>(1);
// Go over 8 columns of B at a time.
#pragma omp for
for (Index B0_colidx = 0; B0_colidx < B_cols; B0_colidx += 8) {
const Register *B0_col = reinterpret_cast<const Register*>(B) + B0_colidx * simd_width;
const Register *B_live = B0_col; //In order to make the code look as much as possible as the above function
const Register *B_end = B_live + simd_width*8;
// TODO: separate first step.
Register sum0 = zeros, sum1 = zeros, sum2 = zeros, sum3 = zeros, sum4 = zeros, sum5 = zeros, sum6 = zeros, sum7 = zeros;
for (; B_live != B_end; B_live += 8) {
// Retrieve the conveniently consecutive values of B.
VNNI8(sum0, a, *B_live);
VNNI8(sum1, a, *(B_live + 1));
VNNI8(sum2, a, *(B_live + 2));
VNNI8(sum3, a, *(B_live + 3));
VNNI8(sum4, a, *(B_live + 4));
VNNI8(sum5, a, *(B_live + 5));
VNNI8(sum6, a, *(B_live + 6));
VNNI8(sum7, a, *(B_live + 7));
}
Register pack0123 = Pack0123(sum0, sum1, sum2, sum3);
Register pack4567 = Pack0123(sum4, sum5, sum6, sum7);
auto total = PermuteSummer(pack0123, pack4567);
callback_impl.Run(total, callbacks::OutputBufferInfo(0, B0_colidx, 1, B_cols));
}
}
constexpr static const char *const kName = "8-bit AVX512VNNI";
static const CPUType kUses = CPUType::AVX512VNNI;
};
} // namespace AVX512VNNI
} // namespace intgemm
#endif
|
mmparallel.c | /*matrix multiplication parallelization*/
// same code for different scheduling
#include <stdlib.h>
#include <stdio.h>
#include <errno.h>
#include <sys/types.h>
#include <memory.h>
#include <malloc.h>
#include <omp.h>
#define SIZE 1000 // for time analisis use 100, 250, 500, 1000, 2000
int main(int argc, char *argv[])
{
float matrixa[SIZE][SIZE], matrixb[SIZE][SIZE], mresult[SIZE][SIZE];
int i, j, k;
/* Initialize the Matrix arrays */
for (i = 0; i < SIZE * SIZE; i++) {
mresult[0][i] = 0.0;
matrixa[0][i] = matrixb[0][i] = rand() * (float)1.1;
}
// Parallel block start
//in order to change the scheduling the word static must be replaced by dynamic or guided and chunksize use 1 to 4
#pragma omp parallel for schedule(static,4) shared(SIZE) private(i,j,k)
/* Matrix-Matrix multiply */
for (i = 0; i < SIZE; i++)
for (j = 0; j < SIZE; j++)
for (k = 0; k < SIZE; k++)
mresult[i][j] = mresult[i][j] + matrixa[i][k] * matrixb[k][j];
exit(0);
}
|
diagonalize_matrix_typed.c | #include "bml.h"
#include "../typed.h"
#include "../macros.h"
#include "../C-interface/dense/bml_getters_dense.h"
#include "../C-interface/bml_logger.h"
#include <complex.h>
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#if defined(SINGLE_REAL) || defined(SINGLE_COMPLEX)
#define REL_TOL 1.2e-5
#else
#define REL_TOL 1e-11
#endif
int TYPED_FUNC(
test_diagonalize) (
const int N,
const bml_matrix_type_t matrix_type,
const bml_matrix_precision_t matrix_precision,
const int M)
{
bml_matrix_t *A = NULL;
bml_matrix_t *A_t = NULL;
REAL_T *eigenvalues = NULL;
bml_matrix_t *eigenvectors = NULL;
bml_matrix_t *ct = NULL;
bml_matrix_t *aux = NULL;
bml_matrix_t *aux1 = NULL;
bml_matrix_t *aux2 = NULL;
bml_matrix_t *id = NULL;
float fnorm;
int max_row = MIN(N, PRINT_THRESHOLD);
int max_col = MIN(N, PRINT_THRESHOLD);
LOG_INFO("rel. tolerance = %e\n", REL_TOL);
bml_distribution_mode_t distrib_mode = sequential;
#ifdef DO_MPI
if (bml_getNRanks() > 1)
{
LOG_INFO("Use distributed matrix\n");
distrib_mode = distributed;
}
#endif
A = bml_random_matrix(matrix_type, matrix_precision, N, M, distrib_mode);
//LOG_INFO("A = \n");
//bml_print_bml_matrix(A, 0, max_row, 0, max_col);
A_t = bml_transpose_new(A);
//LOG_INFO("A_t = \n");
//bml_print_bml_matrix(A_t, 0, max_row, 0, max_col);
bml_add(A, A_t, 0.5, 0.5, 0.0);
LOG_INFO("(A + A_t)/2 = \n");
bml_print_bml_matrix(A, 0, N, 0, N);
switch (matrix_precision)
{
case single_real:
eigenvalues = bml_allocate_memory(N * sizeof(float));
#ifdef INTEL_OPT
#pragma omp parallel for simd
#pragma vector aligned
for (int i = 0; i < N; i++)
{
__assume_aligned(eigenvalues, 64);
eigenvalues[i] = 0.0;
}
#endif
break;
case double_real:
eigenvalues = bml_allocate_memory(N * sizeof(double));
#ifdef INTEL_OPT
#pragma omp parallel for simd
#pragma vector aligned
for (int i = 0; i < N; i++)
{
__assume_aligned(eigenvalues, 64);
eigenvalues[i] = 0.0;
}
#endif
break;
case single_complex:
eigenvalues = bml_allocate_memory(N * sizeof(float complex));
#ifdef INTEL_OPT
#pragma omp parallel for simd
#pragma vector aligned
for (int i = 0; i < N; i++)
{
__assume_aligned(eigenvalues, 64);
eigenvalues[i] = 0.0;
}
#endif
break;
case double_complex:
eigenvalues = bml_allocate_memory(N * sizeof(double complex));
#ifdef INTEL_OPT
#pragma omp parallel for simd
#pragma vector aligned
for (int i = 0; i < N; i++)
{
__assume_aligned(eigenvalues, 64);
eigenvalues[i] = 0.0;
}
#endif
break;
default:
LOG_DEBUG("matrix_precision is not set");
break;
}
eigenvectors = bml_zero_matrix(matrix_type, matrix_precision,
N, M, distrib_mode);
aux = bml_zero_matrix(matrix_type, matrix_precision, N, M, distrib_mode);
aux1 = bml_zero_matrix(matrix_type, matrix_precision, N, M, distrib_mode);
aux2 = bml_zero_matrix(matrix_type, matrix_precision, N, M, distrib_mode);
bml_diagonalize(A, eigenvalues, eigenvectors);
if (bml_getMyRank() == 0)
{
LOG_INFO("%s\n", "eigenvectors");
}
bml_print_bml_matrix(eigenvectors, 0, max_row, 0, max_col);
if (bml_getMyRank() == 0)
{
LOG_INFO("%s\n", "eigenvalues");
for (int i = 0; i < N; i++)
LOG_INFO("val = %e i%e\n", REAL_PART(eigenvalues[i]),
IMAGINARY_PART(eigenvalues[i]));
}
ct = bml_transpose_new(eigenvectors);
if (bml_getMyRank() == 0)
{
LOG_INFO("%s\n", "transpose eigenvectors");
}
bml_print_bml_matrix(ct, 0, max_row, 0, max_col);
bml_multiply(ct, eigenvectors, aux2, 1.0, 0.0, 0.0); // C^t*C
if (bml_getMyRank() == 0)
LOG_INFO("C^t*C matrix:\n");
bml_print_bml_matrix(aux2, 0, max_row, 0, max_col);
REAL_T *aux2_dense = bml_export_to_dense(aux2, dense_row_major);
if (bml_getMyRank() == 0)
{
LOG_INFO("%s\n", "check eigenvectors norms");
for (int i = 0; i < N; i++)
{
REAL_T val = aux2_dense[i + N * i];
if (ABS(val - (REAL_T) 1.0) > REL_TOL)
{
LOG_INFO("i = %d, val = %e i%e\n", i, REAL_PART(val),
IMAGINARY_PART(val));
LOG_ERROR
("Error in matrix diagonalization; eigenvector not normalized\n");
}
}
bml_free_memory(aux2_dense);
}
id = bml_identity_matrix(matrix_type, matrix_precision, N, M,
distrib_mode);
if (bml_getMyRank() == 0)
LOG_INFO("Identity matrix:\n");
bml_print_bml_matrix(id, 0, max_row, 0, max_col);
bml_add(aux2, id, 1.0, -1.0, 0.0);
if (bml_getMyRank() == 0)
LOG_INFO("C^txC^t-Id matrix:\n");
bml_print_bml_matrix(aux2, 0, max_row, 0, max_col);
fnorm = bml_fnorm(aux2);
if (fabsf(fnorm) > N * REL_TOL)
{
LOG_ERROR
("Error in matrix diagonalization; fnorm(C^txC^t-Id) = %e\n",
fnorm);
return -1;
}
bml_set_diagonal(aux1, eigenvalues, 0.0);
if (bml_getMyRank() == 0)
LOG_INFO("Matrix after setting diagonal:\n");
bml_print_bml_matrix(aux1, 0, max_row, 0, max_col);
bml_multiply(aux1, ct, aux2, 1.0, 0.0, 0.0); // D*C^t
bml_multiply(eigenvectors, aux2, aux, 1.0, 0.0, 0.0); // C*(D*C^t)
if (bml_getMyRank() == 0)
LOG_INFO("C*(D*C^t) matrix:\n");
bml_print_bml_matrix(aux, 0, max_row, 0, max_col);
bml_add(aux, A, 1.0, -1.0, 0.0);
if (bml_getMyRank() == 0)
LOG_INFO("C*(D*C^t)-A matrix:\n");
bml_print_bml_matrix(aux, 0, max_row, 0, max_col);
fnorm = bml_fnorm(aux);
if (fabsf(fnorm) > N * REL_TOL || (fnorm != fnorm))
{
LOG_ERROR
("Error in matrix diagonalization; fnorm(CDC^t-A) = %e\n", fnorm);
return -1;
}
bml_deallocate(&A);
bml_deallocate(&aux);
bml_deallocate(&aux1);
bml_deallocate(&aux2);
bml_deallocate(&ct);
bml_deallocate(&A_t);
bml_deallocate(&eigenvectors);
bml_deallocate(&id);
bml_free_memory(eigenvalues);
LOG_INFO("diagonalize matrix test passed\n");
return 0;
}
|
GB_binop__ne_fp32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__ne_fp32)
// A.*B function (eWiseMult): GB (_AemultB_08__ne_fp32)
// A.*B function (eWiseMult): GB (_AemultB_02__ne_fp32)
// A.*B function (eWiseMult): GB (_AemultB_04__ne_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__ne_fp32)
// A*D function (colscale): GB (_AxD__ne_fp32)
// D*A function (rowscale): GB (_DxB__ne_fp32)
// C+=B function (dense accum): GB (_Cdense_accumB__ne_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__ne_fp32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ne_fp32)
// C=scalar+B GB (_bind1st__ne_fp32)
// C=scalar+B' GB (_bind1st_tran__ne_fp32)
// C=A+scalar GB (_bind2nd__ne_fp32)
// C=A'+scalar GB (_bind2nd_tran__ne_fp32)
// C type: bool
// A type: float
// A pattern? 0
// B type: float
// B pattern? 0
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
float aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
float bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_NE || GxB_NO_FP32 || GxB_NO_NE_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__ne_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__ne_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__ne_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__ne_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__ne_fp32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__ne_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
float alpha_scalar ;
float beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((float *) alpha_scalar_in)) ;
beta_scalar = (*((float *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__ne_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__ne_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__ne_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__ne_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__ne_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = GBX (Bx, p, false) ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__ne_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = GBX (Ax, p, false) ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__ne_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__ne_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
reduction-classic.c | /*
1. A local copy of reduction variable is made and initialized depending on the op(e.g. 0 for +).
2. Compiler finds standard reduction expressions containing op and uses them to update the local copy.
3. Local copies are reduced into a single value and combined with the original global value.
*/
#include <stdio.h>
#include <assert.h>
#ifdef _OPENMP
#include <omp.h>
#endif
int main()
{
int i;
long sum=0;
int upper=100;
#pragma omp parallel for reduction(+:sum)
for (i=1; i<= upper; i++){
sum = sum + i;
}
printf("sum from 1 to %d is %ld\n", upper, sum);
assert (sum==5050);
return 0;
}
|
re2ramp.c | #include<Python.h>
#include<numpy/arrayobject.h>
#include<math.h>
#include<omp.h>
#define IND(a,i) *((double *)(a->data+i*a->strides[0]))
static PyObject *re2ramp(PyObject *self, PyObject *args, PyObject *keywds);
static PyObject *re2ramp(PyObject *self, PyObject *args, PyObject *keywds)
{
PyObject *etc;
PyArrayObject *t,*y,*rampparams;
double goal,a,b,m1,m2,t1,t2;
int i;
npy_intp dims[1];
// etc = PyList_New(0);
static char *kwlist[] = {"rampparams","t","etc",NULL};
if(!PyArg_ParseTupleAndKeywords(args,keywds,"OO|O" \
,kwlist,&rampparams,&t,&etc))
{
return NULL;
}
goal = IND(rampparams,0);
a = IND(rampparams,1);
m1 = IND(rampparams,2);
t1 = IND(rampparams,3);
b = IND(rampparams,4);
m2 = IND(rampparams,5);
t2 = IND(rampparams,6);
dims[0] = t->dimensions[0];
y = (PyArrayObject *) PyArray_SimpleNew(1,dims,PyArray_DOUBLE);
#pragma omp parallel for
for(i=0;i<dims[0];i++)
{
IND(y,i) = goal-a*exp(-m1*(IND(t,i)-t1))-b*exp(-m2*(IND(t,i)-t2));
}
return PyArray_Return(y);
}
static char module_docstring[]="\
This function creates a model that fits a ramp using a rising exponential.\n\
\n\
Parameters\n\
----------\n\
goal: goal as x -> inf\n\
m1,m2: rise exp\n\
t1,t2: time offset\n\
t: Array of time/phase points\n\
\n\
Returns\n\
-------\n\
This function returns an array of y values by combining an eclipse and a rising exponential\n\
\n\
Revisions\n\
---------\n\
2010-07-30 Kevin Stevenson, UCF \n\
kevin218@knights.ucf.edu\n\
Original version\n\
2010-12-24 Nate Lust, UCF\n\
natelust at linux dot com\n\
Converted to C\n\
2018-11-22 Jonathan Fraine, SSI\n\
jfraine at spacescience.org\n\
Updated c extensions to python3, with support for python2.7\n\
";
static PyMethodDef module_methods[] = {
{"re2ramp",(PyCFunction)re2ramp,METH_VARARGS|METH_KEYWORDS,module_docstring},{NULL}};
PyMODINIT_FUNC
#if PY_MAJOR_VERSION >= 3
PyInit_re2ramp(void)
#else
initre2ramp(void)
#endif
{
#if PY_MAJOR_VERSION >= 3
PyObject *module;
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"re2ramp", /* m_name */
module_docstring, /* m_doc */
-1, /* m_size */
module_methods, /* m_methods */
NULL, /* m_reload */
NULL, /* m_traverse */
NULL, /* m_clear */
NULL, /* m_free */
};
#endif
#if PY_MAJOR_VERSION >= 3
module = PyModule_Create(&moduledef);
if (!module)
return NULL;
/* Load `numpy` functionality. */
import_array();
return module;
#else
PyObject *m = Py_InitModule3("re2ramp", module_methods, module_docstring);
if (m == NULL)
return;
/* Load `numpy` functionality. */
import_array();
#endif
}
|
par_nongalerkin.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "../HYPRE.h"
/* This file contains the routines for constructing non-Galerkin coarse grid
* operators, based on the original Galerkin coarse grid
*/
/* Take all of the indices from indices[start, start+1, start+2, ..., end]
* and take the corresponding entries in array and place them in-order in output.
* Assumptions:
* output is of length end-start+1
* indices never contains an index that goes out of bounds in array
* */
HYPRE_Int
hypre_GrabSubArray(HYPRE_Int * indices,
HYPRE_Int start,
HYPRE_Int end,
HYPRE_BigInt * array,
HYPRE_BigInt * output)
{
HYPRE_Int i, length;
length = end - start + 1;
for(i = 0; i < length; i++)
{ output[i] = array[ indices[start + i] ]; }
return 0;
}
/* Compute the intersection of x and y, placing
* the intersection in z. Additionally, the array
* x_data is associated with x, i.e., the entries
* that we grab from x, we also grab from x_data.
* If x[k] is placed in z[m], then x_data[k] goes to
* output_x_data[m].
*
* Assumptions:
* z is of length min(x_length, y_length)
* x and y are sorted
* x_length and y_length are similar in size, otherwise,
* looping over the smaller array and doing binary search
* in the longer array is faster.
* */
HYPRE_Int
hypre_IntersectTwoArrays(HYPRE_Int *x,
HYPRE_Real *x_data,
HYPRE_Int x_length,
HYPRE_Int *y,
HYPRE_Int y_length,
HYPRE_Int *z,
HYPRE_Real *output_x_data,
HYPRE_Int *intersect_length)
{
HYPRE_Int x_index = 0;
HYPRE_Int y_index = 0;
*intersect_length = 0;
/* Compute Intersection, looping over each array */
while ( (x_index < x_length) && (y_index < y_length) )
{
if (x[x_index] > y[y_index])
{
y_index = y_index + 1;
}
else if (x[x_index] < y[y_index])
{
x_index = x_index + 1;
}
else
{
z[*intersect_length] = x[x_index];
output_x_data[*intersect_length] = x_data[x_index];
x_index = x_index + 1;
y_index = y_index + 1;
*intersect_length = *intersect_length + 1;
}
}
return 1;
}
HYPRE_Int
hypre_IntersectTwoBigArrays(HYPRE_BigInt *x,
HYPRE_Real *x_data,
HYPRE_Int x_length,
HYPRE_BigInt *y,
HYPRE_Int y_length,
HYPRE_BigInt *z,
HYPRE_Real *output_x_data,
HYPRE_Int *intersect_length)
{
HYPRE_Int x_index = 0;
HYPRE_Int y_index = 0;
*intersect_length = 0;
/* Compute Intersection, looping over each array */
while ( (x_index < x_length) && (y_index < y_length) )
{
if (x[x_index] > y[y_index])
{
y_index = y_index + 1;
}
else if (x[x_index] < y[y_index])
{
x_index = x_index + 1;
}
else
{
z[*intersect_length] = x[x_index];
output_x_data[*intersect_length] = x_data[x_index];
x_index = x_index + 1;
y_index = y_index + 1;
*intersect_length = *intersect_length + 1;
}
}
return 1;
}
/* Copy CSR matrix A to CSR matrix B. The column indices are
* assumed to be sorted, and the sparsity pattern of B is a subset
* of the sparsity pattern of A.
*
* Assumptions:
* Column indices of A and B are sorted
* Sparsity pattern of B is a subset of A's
* A and B are the same size and have same data layout
**/
HYPRE_Int
hypre_SortedCopyParCSRData(hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *B)
{
/* Grab off A and B's data structures */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B);
HYPRE_Int *B_diag_i = hypre_CSRMatrixI(B_diag);
HYPRE_Int *B_diag_j = hypre_CSRMatrixJ(B_diag);
HYPRE_Real *B_diag_data = hypre_CSRMatrixData(B_diag);
hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B);
HYPRE_Int *B_offd_i = hypre_CSRMatrixI(B_offd);
HYPRE_Int *B_offd_j = hypre_CSRMatrixJ(B_offd);
HYPRE_Real *B_offd_data = hypre_CSRMatrixData(B_offd);
HYPRE_Int num_variables = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int *temp_int_array = NULL;
HYPRE_Int temp_int_array_length=0;
HYPRE_Int i, length, offset_A, offset_B;
for(i = 0; i < num_variables; i++)
{
/* Deal with the first row entries, which may be diagonal elements */
if( A_diag_j[A_diag_i[i]] == i)
{ offset_A = 1; }
else
{ offset_A = 0; }
if( B_diag_j[B_diag_i[i]] == i)
{ offset_B = 1; }
else
{ offset_B = 0; }
if( (offset_B == 1) && (offset_A == 1) )
{ B_diag_data[B_diag_i[i]] = A_diag_data[A_diag_i[i]]; }
/* This finds the intersection of the column indices, and
* also copies the matching data in A to the data array in B
**/
if( (A_diag_i[i+1] - A_diag_i[i] - offset_A) > temp_int_array_length )
{
hypre_TFree(temp_int_array, HYPRE_MEMORY_HOST);
temp_int_array_length = (A_diag_i[i+1] - A_diag_i[i] - offset_A);
temp_int_array = hypre_CTAlloc(HYPRE_Int, temp_int_array_length, HYPRE_MEMORY_HOST);
}
hypre_IntersectTwoArrays(&(A_diag_j[A_diag_i[i] + offset_A]),
&(A_diag_data[A_diag_i[i] + offset_A]),
A_diag_i[i+1] - A_diag_i[i] - offset_A,
&(B_diag_j[B_diag_i[i] + offset_B]),
B_diag_i[i+1] - B_diag_i[i] - offset_B,
temp_int_array,
&(B_diag_data[B_diag_i[i] + offset_B]),
&length);
if( (A_offd_i[i+1] - A_offd_i[i]) > temp_int_array_length )
{
hypre_TFree(temp_int_array, HYPRE_MEMORY_HOST);
temp_int_array_length = (A_offd_i[i+1] - A_offd_i[i]);
temp_int_array = hypre_CTAlloc(HYPRE_Int, temp_int_array_length, HYPRE_MEMORY_HOST);
}
hypre_IntersectTwoArrays(&(A_offd_j[A_offd_i[i]]),
&(A_offd_data[A_offd_i[i]]),
A_offd_i[i+1] - A_offd_i[i],
&(B_offd_j[B_offd_i[i]]),
B_offd_i[i+1] - B_offd_i[i],
temp_int_array,
&(B_offd_data[B_offd_i[i]]),
&length);
}
if(temp_int_array)
{ hypre_TFree(temp_int_array, HYPRE_MEMORY_HOST); }
return 1;
}
/*
* Equivalent to hypre_BoomerAMGCreateS, except, the data array of S
* is not Null and contains the data entries from A.
*/
HYPRE_Int
hypre_BoomerAMG_MyCreateS(hypre_ParCSRMatrix *A,
HYPRE_Real strength_threshold,
HYPRE_Real max_row_sum,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
hypre_ParCSRMatrix **S_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Real *A_offd_data = NULL;
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A);
HYPRE_Int num_variables = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt global_num_vars = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_Int num_nonzeros_diag;
HYPRE_Int num_nonzeros_offd = 0;
HYPRE_Int num_cols_offd = 0;
hypre_ParCSRMatrix *S;
hypre_CSRMatrix *S_diag;
HYPRE_Int *S_diag_i;
HYPRE_Int *S_diag_j;
HYPRE_Real *S_diag_data;
hypre_CSRMatrix *S_offd;
HYPRE_Int *S_offd_i = NULL;
HYPRE_Int *S_offd_j = NULL;
HYPRE_Real *S_offd_data;
HYPRE_Real diag, row_scale, row_sum;
HYPRE_Int i, jA, jS;
HYPRE_Int ierr = 0;
HYPRE_Int *dof_func_offd;
HYPRE_Int num_sends;
HYPRE_Int *int_buf_data;
HYPRE_Int index, start, j;
/*--------------------------------------------------------------
* Compute a ParCSR strength matrix, S.
*
* For now, the "strength" of dependence/influence is defined in
* the following way: i depends on j if
* aij > hypre_max (k != i) aik, aii < 0
* or
* aij < hypre_min (k != i) aik, aii >= 0
* Then S_ij = aij, else S_ij = 0.
*
* NOTE: the entries are negative initially, corresponding
* to "unaccounted-for" dependence.
*----------------------------------------------------------------*/
num_nonzeros_diag = A_diag_i[num_variables];
num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
A_offd_i = hypre_CSRMatrixI(A_offd);
num_nonzeros_offd = A_offd_i[num_variables];
/* Initialize S */
S = hypre_ParCSRMatrixCreate(comm, global_num_vars, global_num_vars,
row_starts, row_starts,
num_cols_offd, num_nonzeros_diag, num_nonzeros_offd);
S_diag = hypre_ParCSRMatrixDiag(S);
hypre_CSRMatrixI(S_diag) = hypre_CTAlloc(HYPRE_Int, num_variables+1, HYPRE_MEMORY_HOST);
hypre_CSRMatrixJ(S_diag) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_diag, HYPRE_MEMORY_HOST);
hypre_CSRMatrixData(S_diag) = hypre_CTAlloc(HYPRE_Real, num_nonzeros_diag, HYPRE_MEMORY_HOST);
S_offd = hypre_ParCSRMatrixOffd(S);
hypre_CSRMatrixI(S_offd) = hypre_CTAlloc(HYPRE_Int, num_variables+1, HYPRE_MEMORY_HOST);
S_diag_i = hypre_CSRMatrixI(S_diag);
S_diag_j = hypre_CSRMatrixJ(S_diag);
S_diag_data = hypre_CSRMatrixData(S_diag);
S_offd_i = hypre_CSRMatrixI(S_offd);
hypre_CSRMatrixMemoryLocation(S_diag) = HYPRE_MEMORY_HOST;
hypre_CSRMatrixMemoryLocation(S_offd) = HYPRE_MEMORY_HOST;
dof_func_offd = NULL;
if (num_cols_offd)
{
A_offd_data = hypre_CSRMatrixData(A_offd);
hypre_CSRMatrixJ(S_offd) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_offd, HYPRE_MEMORY_HOST);
hypre_CSRMatrixData(S_offd) = hypre_CTAlloc(HYPRE_Real, num_nonzeros_offd, HYPRE_MEMORY_HOST);
S_offd_j = hypre_CSRMatrixJ(S_offd);
S_offd_data = hypre_CSRMatrixData(S_offd);
hypre_ParCSRMatrixColMapOffd(S) = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1)
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
}
/*-------------------------------------------------------------------
* Get the dof_func data for the off-processor columns
*-------------------------------------------------------------------*/
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
if (num_functions > 1)
{
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
}
/* give S same nonzero structure as A */
hypre_ParCSRMatrixCopy(A,S,1);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,diag,row_scale,row_sum,jA) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_variables; i++)
{
diag = A_diag_data[A_diag_i[i]];
/* compute scaling factor and row sum */
row_scale = 0.0;
row_sum = diag;
if (num_functions > 1)
{
if (diag < 0)
{
for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)
{
if (dof_func[i] == dof_func[A_diag_j[jA]])
{
row_scale = hypre_max(row_scale, A_diag_data[jA]);
row_sum += A_diag_data[jA];
}
}
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
if (dof_func[i] == dof_func_offd[A_offd_j[jA]])
{
row_scale = hypre_max(row_scale, A_offd_data[jA]);
row_sum += A_offd_data[jA];
}
}
}
else
{
for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)
{
if (dof_func[i] == dof_func[A_diag_j[jA]])
{
row_scale = hypre_min(row_scale, A_diag_data[jA]);
row_sum += A_diag_data[jA];
}
}
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
if (dof_func[i] == dof_func_offd[A_offd_j[jA]])
{
row_scale = hypre_min(row_scale, A_offd_data[jA]);
row_sum += A_offd_data[jA];
}
}
}
}
else
{
if (diag < 0)
{
for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)
{
row_scale = hypre_max(row_scale, A_diag_data[jA]);
row_sum += A_diag_data[jA];
}
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
row_scale = hypre_max(row_scale, A_offd_data[jA]);
row_sum += A_offd_data[jA];
}
}
else
{
for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)
{
row_scale = hypre_min(row_scale, A_diag_data[jA]);
row_sum += A_diag_data[jA];
}
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
row_scale = hypre_min(row_scale, A_offd_data[jA]);
row_sum += A_offd_data[jA];
}
}
}
/* compute row entries of S */
S_diag_j[A_diag_i[i]] = -1;
if ((fabs(row_sum) > fabs(diag)*max_row_sum) && (max_row_sum < 1.0))
{
/* make all dependencies weak */
for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)
{
S_diag_j[jA] = -1;
}
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
S_offd_j[jA] = -1;
}
}
else
{
if (num_functions > 1)
{
if (diag < 0)
{
for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)
{
if (A_diag_data[jA] <= strength_threshold * row_scale
|| dof_func[i] != dof_func[A_diag_j[jA]])
{
S_diag_j[jA] = -1;
}
}
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
if (A_offd_data[jA] <= strength_threshold * row_scale
|| dof_func[i] != dof_func_offd[A_offd_j[jA]])
{
S_offd_j[jA] = -1;
}
}
}
else
{
for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)
{
if (A_diag_data[jA] >= strength_threshold * row_scale
|| dof_func[i] != dof_func[A_diag_j[jA]])
{
S_diag_j[jA] = -1;
}
}
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
if (A_offd_data[jA] >= strength_threshold * row_scale
|| dof_func[i] != dof_func_offd[A_offd_j[jA]])
{
S_offd_j[jA] = -1;
}
}
}
}
else
{
if (diag < 0)
{
for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)
{
if (A_diag_data[jA] <= strength_threshold * row_scale)
{
S_diag_j[jA] = -1;
}
}
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
if (A_offd_data[jA] <= strength_threshold * row_scale)
{
S_offd_j[jA] = -1;
}
}
}
else
{
for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)
{
if (A_diag_data[jA] >= strength_threshold * row_scale)
{
S_diag_j[jA] = -1;
}
}
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
if (A_offd_data[jA] >= strength_threshold * row_scale)
{
S_offd_j[jA] = -1;
}
}
}
}
}
}
/*--------------------------------------------------------------
* "Compress" the strength matrix.
*
* NOTE: S has *NO DIAGONAL ELEMENT* on any row. Caveat Emptor!
*
* NOTE: This "compression" section of code may not be removed, the
* non-Galerkin routine depends on it.
*----------------------------------------------------------------*/
/* RDF: not sure if able to thread this loop */
jS = 0;
for (i = 0; i < num_variables; i++)
{
S_diag_i[i] = jS;
for (jA = A_diag_i[i]; jA < A_diag_i[i+1]; jA++)
{
if (S_diag_j[jA] > -1)
{
S_diag_j[jS] = S_diag_j[jA];
S_diag_data[jS] = S_diag_data[jA];
jS++;
}
}
}
S_diag_i[num_variables] = jS;
hypre_CSRMatrixNumNonzeros(S_diag) = jS;
/* RDF: not sure if able to thread this loop */
jS = 0;
for (i = 0; i < num_variables; i++)
{
S_offd_i[i] = jS;
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
if (S_offd_j[jA] > -1)
{
S_offd_j[jS] = S_offd_j[jA];
S_offd_data[jS] = S_offd_data[jA];
jS++;
}
}
}
S_offd_i[num_variables] = jS;
hypre_CSRMatrixNumNonzeros(S_offd) = jS;
hypre_ParCSRMatrixCommPkg(S) = NULL;
*S_ptr = S;
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
return (ierr);
}
/**
* Initialize the IJBuffer counters
**/
HYPRE_Int
hypre_NonGalerkinIJBufferInit( HYPRE_Int *ijbuf_cnt, /* See NonGalerkinIJBufferWrite for parameter descriptions */
HYPRE_Int *ijbuf_rowcounter,
HYPRE_Int *ijbuf_numcols )
{
HYPRE_Int ierr = 0;
(*ijbuf_cnt) = 0;
(*ijbuf_rowcounter) = 1; /*Always points to the next row*/
ijbuf_numcols[0] = 0;
return ierr;
}
/**
* Initialize the IJBuffer counters
**/
HYPRE_Int
hypre_NonGalerkinIJBigBufferInit( HYPRE_Int *ijbuf_cnt, /* See NonGalerkinIJBufferWrite for parameter descriptions */
HYPRE_Int *ijbuf_rowcounter,
HYPRE_BigInt *ijbuf_numcols )
{
HYPRE_Int ierr = 0;
(*ijbuf_cnt) = 0;
(*ijbuf_rowcounter) = 1; /*Always points to the next row*/
ijbuf_numcols[0] = 0;
return ierr;
}
/**
* Update the buffer counters
**/
HYPRE_Int
hypre_NonGalerkinIJBufferNewRow(HYPRE_BigInt *ijbuf_rownums, /* See NonGalerkinIJBufferWrite for parameter descriptions */
HYPRE_Int *ijbuf_numcols,
HYPRE_Int *ijbuf_rowcounter,
HYPRE_BigInt new_row)
{
HYPRE_Int ierr = 0;
/* First check to see if the previous row was empty, and if so, overwrite that row */
if( ijbuf_numcols[(*ijbuf_rowcounter)-1] == 0 )
{
ijbuf_rownums[(*ijbuf_rowcounter)-1] = new_row;
}
else
{
/* Move to the next row */
ijbuf_rownums[(*ijbuf_rowcounter)] = new_row;
ijbuf_numcols[(*ijbuf_rowcounter)] = 0;
(*ijbuf_rowcounter)++;
}
return ierr;
}
/**
* Compress the current row in an IJ Buffer by removing duplicate entries
**/
HYPRE_Int
hypre_NonGalerkinIJBufferCompressRow( HYPRE_Int *ijbuf_cnt, /* See NonGalerkinIJBufferWrite for parameter descriptions */
HYPRE_Int ijbuf_rowcounter,
HYPRE_Real *ijbuf_data,
HYPRE_BigInt *ijbuf_cols,
HYPRE_BigInt *ijbuf_rownums,
HYPRE_Int *ijbuf_numcols)
{
HYPRE_Int ierr = 0;
HYPRE_Int nentries, i, nduplicate;
/* Compress the current row by removing any repeat entries,
* making sure to decrement ijbuf_cnt by nduplicate */
nentries = ijbuf_numcols[ ijbuf_rowcounter-1 ];
nduplicate = 0;
hypre_BigQsort1(ijbuf_cols, ijbuf_data, (*ijbuf_cnt)-nentries, (*ijbuf_cnt)-1 );
for(i =(*ijbuf_cnt)-nentries+1; i <= (*ijbuf_cnt)-1; i++)
{
if( ijbuf_cols[i] == ijbuf_cols[i-1] )
{
/* Shift duplicate entry down */
nduplicate++;
ijbuf_data[i - nduplicate] += ijbuf_data[i];
}
else if(nduplicate > 0)
{
ijbuf_data[i - nduplicate] = ijbuf_data[i];
ijbuf_cols[i - nduplicate] = ijbuf_cols[i];
}
}
(*ijbuf_cnt) -= nduplicate;
ijbuf_numcols[ ijbuf_rowcounter-1 ] -= nduplicate;
return ierr;
}
/**
* Compress the entire buffer, removing duplicate rows
**/
HYPRE_Int
hypre_NonGalerkinIJBufferCompress( HYPRE_Int ijbuf_size,
HYPRE_Int *ijbuf_cnt, /* See NonGalerkinIJBufferWrite for parameter descriptions */
HYPRE_Int *ijbuf_rowcounter,
HYPRE_Real **ijbuf_data,
HYPRE_BigInt **ijbuf_cols,
HYPRE_BigInt **ijbuf_rownums,
HYPRE_Int **ijbuf_numcols)
{
HYPRE_Int ierr = 0;
HYPRE_Int *indys = hypre_CTAlloc(HYPRE_Int, (*ijbuf_rowcounter) , HYPRE_MEMORY_HOST);
HYPRE_Int i, j, duplicate, cnt_new, rowcounter_new, prev_row;
HYPRE_Int row_loc;
HYPRE_BigInt row_start, row_stop, row;
HYPRE_Real *data_new;
HYPRE_BigInt *cols_new;
HYPRE_BigInt *rownums_new;
HYPRE_Int *numcols_new;
/* Do a sort on rownums, but store the original order in indys.
* Then see if there are any duplicate rows */
for(i = 0; i < (*ijbuf_rowcounter); i++)
{ indys[i] = i; }
hypre_BigQsortbi((*ijbuf_rownums), indys, 0, (*ijbuf_rowcounter)-1);
duplicate = 0;
for(i = 1; i < (*ijbuf_rowcounter); i++)
{
if(indys[i] != (indys[i-1]+1))
{
duplicate = 1;
break;
}
}
/* Compress duplicate rows */
if(duplicate)
{
/* Accumulate numcols, so that it functions like a CSR row-pointer */
for(i = 1; i < (*ijbuf_rowcounter); i++)
{ (*ijbuf_numcols)[i] += (*ijbuf_numcols)[i-1]; }
/* Initialize new buffer */
prev_row = -1;
rowcounter_new = 0;
cnt_new = 0;
data_new = hypre_CTAlloc(HYPRE_Real, ijbuf_size, HYPRE_MEMORY_DEVICE);
cols_new = hypre_CTAlloc(HYPRE_BigInt, ijbuf_size, HYPRE_MEMORY_DEVICE);
rownums_new = hypre_CTAlloc(HYPRE_BigInt, ijbuf_size, HYPRE_MEMORY_DEVICE);
numcols_new = hypre_CTAlloc(HYPRE_Int, ijbuf_size, HYPRE_MEMORY_DEVICE);
numcols_new[0] = 0;
/* Cycle through each row */
for(i = 0; i < (*ijbuf_rowcounter); i++)
{
/* Find which row this is in local and global numberings, and where
* this row's data starts and stops in the buffer*/
row_loc = indys[i];
row = (*ijbuf_rownums)[i];
if(row_loc > 0)
{
row_start = (*ijbuf_numcols)[row_loc-1];
row_stop = (*ijbuf_numcols)[row_loc];
}
else
{
row_start = 0;
row_stop = (*ijbuf_numcols)[row_loc];
}
/* Is this a new row? If so, compress previous row, and add a new
* one. Noting that prev_row = -1 is a special value */
if(row != prev_row)
{
if(prev_row != -1)
{
/* Compress previous row */
hypre_NonGalerkinIJBufferCompressRow(&cnt_new, rowcounter_new, data_new,
cols_new, rownums_new, numcols_new);
}
prev_row = row;
numcols_new[rowcounter_new] = 0;
rownums_new[rowcounter_new] = row;
rowcounter_new++;
}
/* Copy row into new buffer */
for(j = row_start; j < row_stop; j++)
{
data_new[cnt_new] = (*ijbuf_data)[j];
cols_new[cnt_new] = (*ijbuf_cols)[j];
numcols_new[rowcounter_new-1]++;
cnt_new++;
}
}
/* Compress the final row */
if(i > 1)
{
hypre_NonGalerkinIJBufferCompressRow(&cnt_new, rowcounter_new, data_new,
cols_new, rownums_new, numcols_new);
}
*ijbuf_cnt = cnt_new;
*ijbuf_rowcounter = rowcounter_new;
/* Point to the new buffer */
hypre_TFree(*ijbuf_data, HYPRE_MEMORY_DEVICE);
hypre_TFree(*ijbuf_cols, HYPRE_MEMORY_DEVICE);
hypre_TFree(*ijbuf_rownums, HYPRE_MEMORY_DEVICE);
hypre_TFree(*ijbuf_numcols, HYPRE_MEMORY_DEVICE);
(*ijbuf_data) = data_new;
(*ijbuf_cols) = cols_new;
(*ijbuf_rownums) = rownums_new;
(*ijbuf_numcols) = numcols_new;
}
hypre_TFree(indys, HYPRE_MEMORY_HOST);
return ierr;
}
/**
* Do a buffered write to an IJ matrix.
* That is, write to the buffer, until the buffer is full. Then when the
* buffer is full, write to the IJ matrix and reset the buffer counters
* In effect, this buffers this operation
* A[row_to_write, col_to_write] += val_to_write
**/
HYPRE_Int
hypre_NonGalerkinIJBufferWrite( HYPRE_IJMatrix B, /* Unassembled matrix to add an entry to */
HYPRE_Int *ijbuf_cnt, /* current buffer size */
HYPRE_Int ijbuf_size, /* max buffer size */
HYPRE_Int *ijbuf_rowcounter, /* num of rows in rownums, (i.e., size of rownums) */
/* This counter will increase as you call this function for multiple rows */
HYPRE_Real **ijbuf_data, /* Array of values, of size ijbuf_size */
HYPRE_BigInt **ijbuf_cols, /* Array of col indices, of size ijbuf_size */
HYPRE_BigInt **ijbuf_rownums, /* Holds row-indices that with numcols makes for a CSR-like data structure*/
HYPRE_Int **ijbuf_numcols, /* rownums[i] is the row num, and numcols holds the number of entries being added */
/* for that row. Note numcols is not cumulative like an actual CSR data structure*/
HYPRE_BigInt row_to_write, /* Entry to add to the buffer */
HYPRE_BigInt col_to_write, /* Ditto */
HYPRE_Real val_to_write ) /* Ditto */
{
HYPRE_Int ierr = 0;
if( (*ijbuf_cnt) == 0 )
{
/* brand new buffer: increment buffer structures for the new row */
hypre_NonGalerkinIJBufferNewRow((*ijbuf_rownums), (*ijbuf_numcols), ijbuf_rowcounter, row_to_write);
}
else if((*ijbuf_rownums)[ (*ijbuf_rowcounter)-1 ] != row_to_write)
{
/* If this is a new row, compress the previous row */
hypre_NonGalerkinIJBufferCompressRow(ijbuf_cnt, (*ijbuf_rowcounter), (*ijbuf_data),
(*ijbuf_cols), (*ijbuf_rownums), (*ijbuf_numcols));
/* increment buffer structures for the new row */
hypre_NonGalerkinIJBufferNewRow( (*ijbuf_rownums), (*ijbuf_numcols), ijbuf_rowcounter, row_to_write);
}
/* Add new entry to buffer */
(*ijbuf_cols)[(*ijbuf_cnt)] = col_to_write;
(*ijbuf_data)[(*ijbuf_cnt)] = val_to_write;
(*ijbuf_numcols)[ (*ijbuf_rowcounter)-1 ]++;
(*ijbuf_cnt)++;
/* Buffer is full, write to the matrix object */
if ( (*ijbuf_cnt) == (ijbuf_size-1) )
{
/* If the last row is empty, decrement rowcounter */
if( (*ijbuf_numcols)[ (*ijbuf_rowcounter)-1 ] == 0)
{ (*ijbuf_rowcounter)--; }
/* Compress and Add Entries */
hypre_NonGalerkinIJBufferCompressRow(ijbuf_cnt, (*ijbuf_rowcounter), (*ijbuf_data),
(*ijbuf_cols), (*ijbuf_rownums), (*ijbuf_numcols));
hypre_NonGalerkinIJBufferCompress(ijbuf_size, ijbuf_cnt, ijbuf_rowcounter, ijbuf_data,
ijbuf_cols, ijbuf_rownums, ijbuf_numcols);
ierr += HYPRE_IJMatrixAddToValues(B, *ijbuf_rowcounter, (*ijbuf_numcols), (*ijbuf_rownums), (*ijbuf_cols), (*ijbuf_data));
/* Reinitialize the buffer */
hypre_NonGalerkinIJBufferInit( ijbuf_cnt, ijbuf_rowcounter, (*ijbuf_numcols));
hypre_NonGalerkinIJBufferNewRow((*ijbuf_rownums), (*ijbuf_numcols), ijbuf_rowcounter, row_to_write);
}
return ierr;
}
/**
* Empty the IJ Buffer with a final AddToValues.
**/
HYPRE_Int
hypre_NonGalerkinIJBufferEmpty(HYPRE_IJMatrix B, /* See NonGalerkinIJBufferWrite for parameter descriptions */
HYPRE_Int ijbuf_size,
HYPRE_Int *ijbuf_cnt,
HYPRE_Int ijbuf_rowcounter,
HYPRE_Real **ijbuf_data,
HYPRE_BigInt **ijbuf_cols,
HYPRE_BigInt **ijbuf_rownums,
HYPRE_Int **ijbuf_numcols)
{
HYPRE_Int ierr = 0;
if( (*ijbuf_cnt) > 0)
{
/* Compress the last row and then write */
hypre_NonGalerkinIJBufferCompressRow(ijbuf_cnt, ijbuf_rowcounter, (*ijbuf_data),
(*ijbuf_cols), (*ijbuf_rownums), (*ijbuf_numcols));
hypre_NonGalerkinIJBufferCompress(ijbuf_size, ijbuf_cnt, &ijbuf_rowcounter, ijbuf_data,
ijbuf_cols, ijbuf_rownums, ijbuf_numcols);
ierr += HYPRE_IJMatrixAddToValues(B, ijbuf_rowcounter, (*ijbuf_numcols), (*ijbuf_rownums), (*ijbuf_cols), (*ijbuf_data));
}
(*ijbuf_cnt = 0);
return ierr;
}
/*
* Construct sparsity pattern based on R_I A P, plus entries required by drop tolerance
*/
hypre_ParCSRMatrix *
hypre_NonGalerkinSparsityPattern(hypre_ParCSRMatrix *R_IAP,
hypre_ParCSRMatrix *RAP,
HYPRE_Int * CF_marker,
HYPRE_Real droptol,
HYPRE_Int sym_collapse,
HYPRE_Int collapse_beta )
{
/* MPI Communicator */
MPI_Comm comm = hypre_ParCSRMatrixComm(RAP);
/* Declare R_IAP */
hypre_CSRMatrix *R_IAP_diag = hypre_ParCSRMatrixDiag(R_IAP);
HYPRE_Int *R_IAP_diag_i = hypre_CSRMatrixI(R_IAP_diag);
HYPRE_Int *R_IAP_diag_j = hypre_CSRMatrixJ(R_IAP_diag);
hypre_CSRMatrix *R_IAP_offd = hypre_ParCSRMatrixOffd(R_IAP);
HYPRE_Int *R_IAP_offd_i = hypre_CSRMatrixI(R_IAP_offd);
HYPRE_Int *R_IAP_offd_j = hypre_CSRMatrixJ(R_IAP_offd);
HYPRE_BigInt *col_map_offd_R_IAP = hypre_ParCSRMatrixColMapOffd(R_IAP);
/* Declare RAP */
hypre_CSRMatrix *RAP_diag = hypre_ParCSRMatrixDiag(RAP);
HYPRE_Int *RAP_diag_i = hypre_CSRMatrixI(RAP_diag);
HYPRE_Real *RAP_diag_data = hypre_CSRMatrixData(RAP_diag);
HYPRE_Int *RAP_diag_j = hypre_CSRMatrixJ(RAP_diag);
HYPRE_BigInt first_col_diag_RAP = hypre_ParCSRMatrixFirstColDiag(RAP);
HYPRE_Int num_cols_diag_RAP = hypre_CSRMatrixNumCols(RAP_diag);
HYPRE_BigInt last_col_diag_RAP = first_col_diag_RAP + (HYPRE_BigInt)num_cols_diag_RAP - 1;
hypre_CSRMatrix *RAP_offd = hypre_ParCSRMatrixOffd(RAP);
HYPRE_Int *RAP_offd_i = hypre_CSRMatrixI(RAP_offd);
HYPRE_Real *RAP_offd_data = NULL;
HYPRE_Int *RAP_offd_j = hypre_CSRMatrixJ(RAP_offd);
HYPRE_BigInt *col_map_offd_RAP = hypre_ParCSRMatrixColMapOffd(RAP);
HYPRE_Int num_cols_RAP_offd = hypre_CSRMatrixNumCols(RAP_offd);
HYPRE_Int num_variables = hypre_CSRMatrixNumRows(RAP_diag);
/* Declare A */
HYPRE_Int num_fine_variables = hypre_CSRMatrixNumRows(R_IAP_diag);
/* Declare IJ matrices */
HYPRE_IJMatrix Pattern;
hypre_ParCSRMatrix *Pattern_CSR = NULL;
/* Buffered IJAddToValues */
HYPRE_Int ijbuf_cnt, ijbuf_size, ijbuf_rowcounter;
HYPRE_Real *ijbuf_data;
HYPRE_BigInt *ijbuf_cols, *ijbuf_rownums;
HYPRE_Int *ijbuf_numcols;
/* Buffered IJAddToValues for Symmetric Entries */
HYPRE_Int ijbuf_sym_cnt, ijbuf_sym_rowcounter;
HYPRE_Real *ijbuf_sym_data;
HYPRE_BigInt *ijbuf_sym_cols, *ijbuf_sym_rownums;
HYPRE_Int *ijbuf_sym_numcols;
/* Other Declarations */
HYPRE_Int ierr = 0;
HYPRE_Real max_entry = 0.0;
HYPRE_Real max_entry_offd = 0.0;
HYPRE_Int * rownz = NULL;
HYPRE_Int i, j, Cpt;
HYPRE_BigInt row_start, row_end, global_row, global_col;
/* Other Setup */
if (num_cols_RAP_offd)
{ RAP_offd_data = hypre_CSRMatrixData(RAP_offd); }
/*
* Initialize the IJ matrix, leveraging our rough knowledge of the
* nonzero structure of Pattern based on RAP
*
* ilower, iupper, jlower, jupper */
ierr += HYPRE_IJMatrixCreate(comm, first_col_diag_RAP, last_col_diag_RAP, first_col_diag_RAP, last_col_diag_RAP, &Pattern);
ierr += HYPRE_IJMatrixSetObjectType(Pattern, HYPRE_PARCSR);
rownz = hypre_CTAlloc(HYPRE_Int, num_variables, HYPRE_MEMORY_HOST);
for(i = 0; i < num_variables; i++)
{ rownz[i] = 1.2*(RAP_diag_i[i+1] - RAP_diag_i[i]) + 1.2*(RAP_offd_i[i+1] - RAP_offd_i[i]); }
HYPRE_IJMatrixSetRowSizes(Pattern, rownz);
ierr += HYPRE_IJMatrixInitialize(Pattern);
hypre_TFree(rownz, HYPRE_MEMORY_HOST);
/*
*For efficiency, we do a buffered IJAddToValues.
* Here, we initialize the buffer and then initialize the buffer counters
*/
ijbuf_size = 1000;
ijbuf_data = hypre_CTAlloc(HYPRE_Real, ijbuf_size, HYPRE_MEMORY_DEVICE);
ijbuf_cols = hypre_CTAlloc(HYPRE_BigInt, ijbuf_size, HYPRE_MEMORY_DEVICE);
ijbuf_rownums = hypre_CTAlloc(HYPRE_BigInt, ijbuf_size, HYPRE_MEMORY_DEVICE);
ijbuf_numcols = hypre_CTAlloc(HYPRE_Int, ijbuf_size, HYPRE_MEMORY_DEVICE);
hypre_NonGalerkinIJBigBufferInit( &ijbuf_cnt, &ijbuf_rowcounter, ijbuf_cols );
if(sym_collapse)
{
ijbuf_sym_data = hypre_CTAlloc(HYPRE_Real, ijbuf_size, HYPRE_MEMORY_DEVICE);
ijbuf_sym_cols = hypre_CTAlloc(HYPRE_BigInt, ijbuf_size, HYPRE_MEMORY_DEVICE);
ijbuf_sym_rownums= hypre_CTAlloc(HYPRE_BigInt, ijbuf_size, HYPRE_MEMORY_DEVICE);
ijbuf_sym_numcols= hypre_CTAlloc(HYPRE_Int, ijbuf_size, HYPRE_MEMORY_DEVICE);
hypre_NonGalerkinIJBigBufferInit( &ijbuf_sym_cnt, &ijbuf_sym_rowcounter, ijbuf_sym_cols );
}
/*
* Place entries in R_IAP into Pattern
*/
Cpt = -1; /* Cpt contains the fine grid index of the i-th Cpt */
for(i = 0; i < num_variables; i++)
{
global_row = i+first_col_diag_RAP;
/* Find the next Coarse Point in CF_marker */
for(j = Cpt+1; j < num_fine_variables; j++)
{
if(CF_marker[j] == 1) /* Found Next C-point */
{
Cpt = j;
break;
}
}
/* Diag Portion */
row_start = R_IAP_diag_i[Cpt];
row_end = R_IAP_diag_i[Cpt+1];
for(j = row_start; j < row_end; j++)
{
global_col = R_IAP_diag_j[j] + first_col_diag_RAP;
/* This call adds a 1 x 1 to i j data */
hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
global_col, 1.0);
if (sym_collapse)
{
hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_sym_cnt,
ijbuf_size, &ijbuf_sym_rowcounter, &ijbuf_sym_data,
&ijbuf_sym_cols, &ijbuf_sym_rownums, &ijbuf_sym_numcols,
global_col, global_row, 1.0);
}
}
/* Offdiag Portion */
row_start = R_IAP_offd_i[Cpt];
row_end = R_IAP_offd_i[Cpt+1];
for(j = row_start; j < row_end; j++)
{
global_col = col_map_offd_R_IAP[ R_IAP_offd_j[j] ];
/* This call adds a 1 x 1 to i j data */
hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
global_col, 1.0);
if (sym_collapse)
{
hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_sym_cnt,
ijbuf_size, &ijbuf_sym_rowcounter, &ijbuf_sym_data,
&ijbuf_sym_cols, &ijbuf_sym_rownums, &ijbuf_sym_numcols,
global_col, global_row, 1.0);
}
}
}
/*
* Use drop-tolerance to compute new entries for sparsity pattern
*/
/*#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,max_entry,max_entry_offd,global_col,global_row) HYPRE_SMP_SCHEDULE
#endif */
for(i = 0; i < num_variables; i++)
{
global_row = i+first_col_diag_RAP;
/* Compute the drop tolerance for this row, which is just
* abs(max of row i)*droptol */
max_entry = -1.0;
for(j = RAP_diag_i[i]; j < RAP_diag_i[i+1]; j++)
{
if( (RAP_diag_j[j] != i) && (max_entry < fabs(RAP_diag_data[j]) ) )
{ max_entry = fabs(RAP_diag_data[j]); }
}
for(j = RAP_offd_i[i]; j < RAP_offd_i[i+1]; j++)
{
{
if( max_entry < fabs(RAP_offd_data[j]) )
{ max_entry = fabs(RAP_offd_data[j]); }
}
}
max_entry *= droptol;
max_entry_offd = max_entry*collapse_beta;
/* Loop over diag portion, adding all entries that are "strong" */
for(j = RAP_diag_i[i]; j < RAP_diag_i[i+1]; j++)
{
if( fabs(RAP_diag_data[j]) > max_entry )
{
global_col = RAP_diag_j[j] + first_col_diag_RAP;
/*#ifdef HYPRE_USING_OPENMP
#pragma omp critical (IJAdd)
#endif
{*/
/* For efficiency, we do a buffered IJAddToValues
* A[global_row, global_col] += 1.0 */
hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
global_col, 1.0 );
if(sym_collapse)
{
hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_sym_cnt,
ijbuf_size, &ijbuf_sym_rowcounter, &ijbuf_sym_data,
&ijbuf_sym_cols, &ijbuf_sym_rownums, &ijbuf_sym_numcols,
global_col, global_row, 1.0 );
}
/*}*/
}
}
/* Loop over offd portion, adding all entries that are "strong" */
for(j = RAP_offd_i[i]; j < RAP_offd_i[i+1]; j++)
{
if( fabs(RAP_offd_data[j]) > max_entry_offd )
{
global_col = col_map_offd_RAP[ RAP_offd_j[j] ];
/*#ifdef HYPRE_USING_OPENMP
#pragma omp critical (IJAdd)
#endif
{*/
/* For efficiency, we do a buffered IJAddToValues
* A[global_row, global_col] += 1.0 */
hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
global_col, 1.0 );
if(sym_collapse)
{
hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_sym_cnt,
ijbuf_size, &ijbuf_sym_rowcounter, &ijbuf_sym_data,
&ijbuf_sym_cols, &ijbuf_sym_rownums, &ijbuf_sym_numcols,
global_col, global_row, 1.0 );
}
/*}*/
}
}
}
/* For efficiency, we do a buffered IJAddToValues.
* This empties the buffer of any remaining values */
hypre_NonGalerkinIJBufferEmpty(Pattern, ijbuf_size, &ijbuf_cnt, ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols);
if(sym_collapse)
hypre_NonGalerkinIJBufferEmpty(Pattern, ijbuf_size, &ijbuf_sym_cnt, ijbuf_sym_rowcounter,
&ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols);
/* Finalize Construction of Pattern */
ierr += HYPRE_IJMatrixAssemble(Pattern);
ierr += HYPRE_IJMatrixGetObject( Pattern, (void**) &Pattern_CSR );
/* Deallocate */
ierr += HYPRE_IJMatrixSetObjectType(Pattern, -1);
ierr += HYPRE_IJMatrixDestroy(Pattern);
hypre_TFree(ijbuf_data, HYPRE_MEMORY_DEVICE);
hypre_TFree(ijbuf_cols, HYPRE_MEMORY_DEVICE);
hypre_TFree(ijbuf_rownums, HYPRE_MEMORY_DEVICE);
hypre_TFree(ijbuf_numcols, HYPRE_MEMORY_DEVICE);
if(sym_collapse)
{
hypre_TFree(ijbuf_sym_data, HYPRE_MEMORY_DEVICE);
hypre_TFree(ijbuf_sym_cols, HYPRE_MEMORY_DEVICE);
hypre_TFree(ijbuf_sym_rownums, HYPRE_MEMORY_DEVICE);
hypre_TFree(ijbuf_sym_numcols, HYPRE_MEMORY_DEVICE);
}
return Pattern_CSR;
}
HYPRE_Int
hypre_BoomerAMGBuildNonGalerkinCoarseOperator( hypre_ParCSRMatrix **RAP_ptr,
hypre_ParCSRMatrix *AP,
HYPRE_Real strong_threshold,
HYPRE_Real max_row_sum,
HYPRE_Int num_functions,
HYPRE_Int * dof_func_value,
HYPRE_Int * CF_marker,
HYPRE_Real droptol, HYPRE_Int sym_collapse,
HYPRE_Real lump_percent, HYPRE_Int collapse_beta )
{
/* Initializations */
MPI_Comm comm = hypre_ParCSRMatrixComm(*RAP_ptr);
hypre_ParCSRMatrix *S = NULL;
hypre_ParCSRMatrix *RAP = *RAP_ptr;
HYPRE_Int i, j, k, row_start, row_end, value, num_cols_offd_Sext, num_procs;
HYPRE_Int S_ext_diag_size, S_ext_offd_size, last_col_diag_RAP, cnt_offd, cnt_diag, cnt;
HYPRE_Int col_indx_Pattern, current_Pattern_j, col_indx_RAP;
/* HYPRE_Real start_time = hypre_MPI_Wtime(); */
/* HYPRE_Real end_time; */
HYPRE_BigInt *temp = NULL;
HYPRE_Int ierr = 0;
char filename[256];
/* Lumping related variables */
HYPRE_IJMatrix ijmatrix;
HYPRE_BigInt * Pattern_offd_indices = NULL;
HYPRE_BigInt * S_offd_indices = NULL;
HYPRE_BigInt * offd_intersection = NULL;
HYPRE_Real * offd_intersection_data = NULL;
HYPRE_Int * diag_intersection = NULL;
HYPRE_Real * diag_intersection_data = NULL;
HYPRE_Int Pattern_offd_indices_len = 0;
HYPRE_Int Pattern_offd_indices_allocated_len= 0;
HYPRE_Int S_offd_indices_len = 0;
HYPRE_Int S_offd_indices_allocated_len = 0;
HYPRE_Int offd_intersection_len = 0;
HYPRE_Int offd_intersection_allocated_len = 0;
HYPRE_Int diag_intersection_len = 0;
HYPRE_Int diag_intersection_allocated_len = 0;
HYPRE_Real intersection_len = 0;
HYPRE_Int * Pattern_indices_ptr = NULL;
HYPRE_Int Pattern_diag_indices_len = 0;
HYPRE_Int global_row = 0;
HYPRE_Int has_row_ended = 0;
HYPRE_Real lump_value = 0.;
HYPRE_Real diagonal_lump_value = 0.;
HYPRE_Real neg_lump_value = 0.;
HYPRE_Real sum_strong_neigh = 0.;
HYPRE_Int * rownz = NULL;
/* offd and diag portions of RAP */
hypre_CSRMatrix *RAP_diag = hypre_ParCSRMatrixDiag(RAP);
HYPRE_Int *RAP_diag_i = hypre_CSRMatrixI(RAP_diag);
HYPRE_Real *RAP_diag_data = hypre_CSRMatrixData(RAP_diag);
HYPRE_Int *RAP_diag_j = hypre_CSRMatrixJ(RAP_diag);
HYPRE_BigInt first_col_diag_RAP = hypre_ParCSRMatrixFirstColDiag(RAP);
HYPRE_Int num_cols_diag_RAP = hypre_CSRMatrixNumCols(RAP_diag);
hypre_CSRMatrix *RAP_offd = hypre_ParCSRMatrixOffd(RAP);
HYPRE_Int *RAP_offd_i = hypre_CSRMatrixI(RAP_offd);
HYPRE_Real *RAP_offd_data = NULL;
HYPRE_Int *RAP_offd_j = hypre_CSRMatrixJ(RAP_offd);
HYPRE_BigInt *col_map_offd_RAP = hypre_ParCSRMatrixColMapOffd(RAP);
HYPRE_Int num_cols_RAP_offd = hypre_CSRMatrixNumCols(RAP_offd);
HYPRE_Int num_variables = hypre_CSRMatrixNumRows(RAP_diag);
HYPRE_BigInt global_num_vars = hypre_ParCSRMatrixGlobalNumRows(RAP);
/* offd and diag portions of S */
hypre_CSRMatrix *S_diag = NULL;
HYPRE_Int *S_diag_i = NULL;
HYPRE_Real *S_diag_data = NULL;
HYPRE_Int *S_diag_j = NULL;
hypre_CSRMatrix *S_offd = NULL;
HYPRE_Int *S_offd_i = NULL;
HYPRE_Real *S_offd_data = NULL;
HYPRE_Int *S_offd_j = NULL;
HYPRE_BigInt *col_map_offd_S = NULL;
HYPRE_Int num_cols_offd_S;
/* HYPRE_Int num_nonzeros_S_diag; */
/* off processor portions of S */
hypre_CSRMatrix *S_ext = NULL;
HYPRE_Int *S_ext_i = NULL;
HYPRE_Real *S_ext_data = NULL;
HYPRE_BigInt *S_ext_j = NULL;
HYPRE_Int *S_ext_diag_i = NULL;
HYPRE_Real *S_ext_diag_data = NULL;
HYPRE_Int *S_ext_diag_j = NULL;
HYPRE_Int *S_ext_offd_i = NULL;
HYPRE_Real *S_ext_offd_data = NULL;
HYPRE_Int *S_ext_offd_j = NULL;
HYPRE_BigInt *col_map_offd_Sext = NULL;
/* HYPRE_Int num_nonzeros_S_ext_diag;
HYPRE_Int num_nonzeros_S_ext_offd;
HYPRE_Int num_rows_Sext = 0; */
HYPRE_Int row_indx_Sext = 0;
/* offd and diag portions of Pattern */
hypre_ParCSRMatrix *Pattern = NULL;
hypre_CSRMatrix *Pattern_diag = NULL;
HYPRE_Int *Pattern_diag_i = NULL;
HYPRE_Real *Pattern_diag_data = NULL;
HYPRE_Int *Pattern_diag_j = NULL;
hypre_CSRMatrix *Pattern_offd = NULL;
HYPRE_Int *Pattern_offd_i = NULL;
HYPRE_Real *Pattern_offd_data = NULL;
HYPRE_Int *Pattern_offd_j = NULL;
HYPRE_BigInt *col_map_offd_Pattern = NULL;
HYPRE_Int num_cols_Pattern_offd;
HYPRE_Int my_id;
/* Buffered IJAddToValues */
HYPRE_Int ijbuf_cnt, ijbuf_size, ijbuf_rowcounter;
HYPRE_Real *ijbuf_data;
HYPRE_BigInt *ijbuf_cols, *ijbuf_rownums;
HYPRE_Int *ijbuf_numcols;
/* Buffered IJAddToValues for Symmetric Entries */
HYPRE_Int ijbuf_sym_cnt, ijbuf_sym_rowcounter;
HYPRE_Real *ijbuf_sym_data;
HYPRE_BigInt *ijbuf_sym_cols, *ijbuf_sym_rownums;
HYPRE_Int *ijbuf_sym_numcols;
/* Further Initializations */
if (num_cols_RAP_offd)
{ RAP_offd_data = hypre_CSRMatrixData(RAP_offd); }
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
/* Compute Sparsity Pattern */
Pattern = hypre_NonGalerkinSparsityPattern(AP, RAP, CF_marker, droptol, sym_collapse, collapse_beta);
Pattern_diag = hypre_ParCSRMatrixDiag(Pattern);
Pattern_diag_i = hypre_CSRMatrixI(Pattern_diag);
Pattern_diag_data = hypre_CSRMatrixData(Pattern_diag);
Pattern_diag_j = hypre_CSRMatrixJ(Pattern_diag);
Pattern_offd = hypre_ParCSRMatrixOffd(Pattern);
Pattern_offd_i = hypre_CSRMatrixI(Pattern_offd);
Pattern_offd_j = hypre_CSRMatrixJ(Pattern_offd);
col_map_offd_Pattern = hypre_ParCSRMatrixColMapOffd(Pattern);
num_cols_Pattern_offd = hypre_CSRMatrixNumCols(Pattern_offd);
if (num_cols_Pattern_offd)
{ Pattern_offd_data = hypre_CSRMatrixData(Pattern_offd); }
/**
* Fill in the entries of Pattern with entries from RAP
**/
/* First, sort column indices in RAP and Pattern */
for(i = 0; i < num_variables; i++)
{
/* The diag matrices store the diagonal as first element in each row.
* We maintain that for the case of Pattern and RAP, because the
* strength of connection routine relies on it and we need to ignore
* diagonal entries in Pattern later during set intersections.
* */
/* Sort diag portion of RAP */
row_start = RAP_diag_i[i];
if( RAP_diag_j[row_start] == i)
{ row_start = row_start + 1; }
row_end = RAP_diag_i[i+1];
hypre_qsort1(RAP_diag_j, RAP_diag_data, row_start, row_end-1 );
/* Sort diag portion of Pattern */
row_start = Pattern_diag_i[i];
if( Pattern_diag_j[row_start] == i)
{ row_start = row_start + 1; }
row_end = Pattern_diag_i[i+1];
hypre_qsort1(Pattern_diag_j, Pattern_diag_data, row_start, row_end-1 );
/* Sort offd portion of RAP */
row_start = RAP_offd_i[i];
row_end = RAP_offd_i[i+1];
hypre_qsort1(RAP_offd_j, RAP_offd_data, row_start, row_end-1 );
/* Sort offd portion of Pattern */
/* Be careful to map coarse dof i with CF_marker into Pattern */
row_start = Pattern_offd_i[i];
row_end = Pattern_offd_i[i+1];
hypre_qsort1(Pattern_offd_j, Pattern_offd_data, row_start, row_end-1 );
}
/* Create Strength matrix based on RAP or Pattern. If Pattern is used,
* then the SortedCopyParCSRData(...) function call must also be commented
* back in */
/* hypre_SortedCopyParCSRData(RAP, Pattern); */
if(0)
{
/* hypre_BoomerAMG_MyCreateS(Pattern, strong_threshold, max_row_sum, */
hypre_BoomerAMG_MyCreateS(RAP, strong_threshold, max_row_sum,
num_functions, dof_func_value, &S);
}
else
{
/* Passing in "1, NULL" because dof_array is not needed
* because we assume that the number of functions is 1 */
/* hypre_BoomerAMG_MyCreateS(Pattern, strong_threshold, max_row_sum,*/
hypre_BoomerAMG_MyCreateS(RAP, strong_threshold, max_row_sum,
1, NULL, &S);
}
/* Grab diag and offd parts of S */
S_diag = hypre_ParCSRMatrixDiag(S);
S_diag_i = hypre_CSRMatrixI(S_diag);
S_diag_j = hypre_CSRMatrixJ(S_diag);
S_diag_data = hypre_CSRMatrixData(S_diag);
S_offd = hypre_ParCSRMatrixOffd(S);
S_offd_i = hypre_CSRMatrixI(S_offd);
S_offd_j = hypre_CSRMatrixJ(S_offd);
S_offd_data = hypre_CSRMatrixData(S_offd);
col_map_offd_S = hypre_ParCSRMatrixColMapOffd(S);
num_cols_offd_S = hypre_CSRMatrixNumCols(S_offd);
/* num_nonzeros_S_diag = S_diag_i[num_variables]; */
/* Grab part of S that is distance one away from the local rows
* This is needed later for the stencil collapsing. This section
* of the code mimics par_rap.c when it extracts Ps_ext.
* When moving from par_rap.c, the variable name changes were:
* A --> RAP
* P --> S
* Ps_ext --> S_ext
* P_ext_diag --> S_ext_diag
* P_ext_offd --> S_ext_offd
*
* The data layout of S_ext as returned by ExtractBExt gives you only global
* column indices, and must be converted to the local numbering. This code
* section constructs S_ext_diag and S_ext_offd, which are the distance 1
* couplings in S based on the sparsity structure in RAP.
* --> S_ext_diag corresponds to the same column slice that RAP_diag
* corresponds to. Thus, the column indexing is the same as in
* RAP_diag such that S_ext_diag_j[k] just needs to be offset by
* the RAP_diag first global dof offset.
* --> S_ext_offd column indexing is a little more complicated, and
* requires the computation below of col_map_S_ext_offd, which
* maps the local 0,1,2,... column indexing in S_ext_offd to global
* dof numbers. Note, that the num_cols_RAP_offd is NOT equal to
* num_cols_offd_S_ext
* --> The row indexing of S_ext_diag|offd is as follows. Use
* col_map_offd_RAP, where the first index corresponds to the
* first global row index in S_ext_diag|offd. Remember that ExtractBExt
* grabs the information from S required for locally computing
* (RAP*S)[proc_k row slice, :] */
if (num_procs > 1)
{
S_ext = hypre_ParCSRMatrixExtractBExt(S,RAP,1);
S_ext_data = hypre_CSRMatrixData(S_ext);
S_ext_i = hypre_CSRMatrixI(S_ext);
S_ext_j = hypre_CSRMatrixBigJ(S_ext);
}
/* This uses the num_cols_RAP_offd to set S_ext_diag|offd_i, because S_ext
* is the off-processor information needed to compute RAP*S. That is,
* num_cols_RAP_offd represents the number of rows needed from S_ext for
* the multiplication */
S_ext_diag_i = hypre_CTAlloc(HYPRE_Int, num_cols_RAP_offd+1, HYPRE_MEMORY_HOST);
S_ext_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols_RAP_offd+1, HYPRE_MEMORY_HOST);
S_ext_diag_size = 0;
S_ext_offd_size = 0;
/* num_rows_Sext = num_cols_RAP_offd; */
last_col_diag_RAP = first_col_diag_RAP + num_cols_diag_RAP - 1;
/* construct the S_ext_diag and _offd row-pointer arrays by counting elements
* This looks to create offd and diag blocks related to the local rows belonging
* to this processor...we may not need to split up S_ext this way...or we could.
* It would make for faster binary searching and set intersecting later...this will
* be the bottle neck so LETS SPLIT THIS UP Between offd and diag*/
for (i=0; i < num_cols_RAP_offd; i++)
{
for (j=S_ext_i[i]; j < S_ext_i[i+1]; j++)
if (S_ext_j[j] < first_col_diag_RAP || S_ext_j[j] > last_col_diag_RAP)
S_ext_offd_size++;
else
S_ext_diag_size++;
S_ext_diag_i[i+1] = S_ext_diag_size;
S_ext_offd_i[i+1] = S_ext_offd_size;
}
if (S_ext_diag_size)
{
S_ext_diag_j = hypre_CTAlloc(HYPRE_Int, S_ext_diag_size, HYPRE_MEMORY_HOST);
S_ext_diag_data = hypre_CTAlloc(HYPRE_Real, S_ext_diag_size, HYPRE_MEMORY_HOST);
}
if (S_ext_offd_size)
{
S_ext_offd_j = hypre_CTAlloc(HYPRE_Int, S_ext_offd_size, HYPRE_MEMORY_HOST);
S_ext_offd_data = hypre_CTAlloc(HYPRE_Real, S_ext_offd_size, HYPRE_MEMORY_HOST);
}
/* This copies over the column indices into the offd and diag parts.
* The diag portion has it's local column indices shifted to start at 0.
* The offd portion requires more work to construct the col_map_offd array
* and a local column ordering. */
cnt_offd = 0;
cnt_diag = 0;
cnt = 0;
for (i=0; i < num_cols_RAP_offd; i++)
{
for (j=S_ext_i[i]; j < S_ext_i[i+1]; j++)
if (S_ext_j[j] < first_col_diag_RAP || S_ext_j[j] > last_col_diag_RAP)
{
S_ext_offd_data[cnt_offd] = S_ext_data[j];
//S_ext_offd_j[cnt_offd++] = S_ext_j[j];
S_ext_j[cnt_offd++] = S_ext_j[j];
}
else
{
S_ext_diag_data[cnt_diag] = S_ext_data[j];
S_ext_diag_j[cnt_diag++] = (HYPRE_Int)(S_ext_j[j] - first_col_diag_RAP);
}
}
/* This creates col_map_offd_Sext */
if (S_ext_offd_size || num_cols_offd_S)
{
temp = hypre_CTAlloc(HYPRE_BigInt, S_ext_offd_size+num_cols_offd_S, HYPRE_MEMORY_HOST);
for (i=0; i < S_ext_offd_size; i++)
temp[i] = S_ext_j[i];
cnt = S_ext_offd_size;
for (i=0; i < num_cols_offd_S; i++)
temp[cnt++] = col_map_offd_S[i];
}
if (cnt)
{
/* after this, the first so many entries of temp will hold the
* unique column indices in S_ext_offd_j unioned with the indices
* in col_map_offd_S */
hypre_BigQsort0(temp, 0, cnt-1);
num_cols_offd_Sext = 1;
value = temp[0];
for (i=1; i < cnt; i++)
{
if (temp[i] > value)
{
value = temp[i];
temp[num_cols_offd_Sext++] = value;
}
}
}
else
{
num_cols_offd_Sext = 0;
}
/* num_nonzeros_S_ext_diag = cnt_diag;
num_nonzeros_S_ext_offd = S_ext_offd_size; */
if (num_cols_offd_Sext)
col_map_offd_Sext = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_Sext, HYPRE_MEMORY_HOST);
for (i=0; i < num_cols_offd_Sext; i++)
col_map_offd_Sext[i] = temp[i];
if (S_ext_offd_size || num_cols_offd_S)
hypre_TFree(temp, HYPRE_MEMORY_HOST);
/* look for S_ext_offd_j[i] in col_map_offd_Sext, and set S_ext_offd_j[i]
* to the index of that column value in col_map_offd_Sext */
for (i=0 ; i < S_ext_offd_size; i++)
S_ext_offd_j[i] = hypre_BigBinarySearch(col_map_offd_Sext,
S_ext_j[i],
num_cols_offd_Sext);
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(S_ext);
S_ext = NULL;
}
/* Need to sort column indices in S and S_ext */
for(i = 0; i < num_variables; i++)
{
/* Re-Sort diag portion of Pattern, placing the diagonal entry in a
* sorted position */
row_start = Pattern_diag_i[i];
row_end = Pattern_diag_i[i+1];
hypre_qsort1(Pattern_diag_j, Pattern_diag_data, row_start, row_end-1 );
/* Sort diag portion of S, noting that no diagonal entry */
/* S has not "data" array...it's just NULL */
row_start = S_diag_i[i];
row_end = S_diag_i[i+1];
hypre_qsort1(S_diag_j, S_diag_data, row_start, row_end-1 );
/* Sort offd portion of S */
/* S has no "data" array...it's just NULL */
row_start = S_offd_i[i];
row_end = S_offd_i[i+1];
hypre_qsort1(S_offd_j, S_offd_data, row_start, row_end-1 );
}
/* Sort S_ext
* num_cols_RAP_offd equals num_rows for S_ext*/
for(i = 0; i < num_cols_RAP_offd; i++)
{
/* Sort diag portion of S_ext */
row_start = S_ext_diag_i[i];
row_end = S_ext_diag_i[i+1];
hypre_qsort1(S_ext_diag_j, S_ext_diag_data, row_start, row_end-1 );
/* Sort offd portion of S_ext */
row_start = S_ext_offd_i[i];
row_end = S_ext_offd_i[i+1];
hypre_qsort1(S_ext_offd_j, S_ext_offd_data, row_start, row_end-1 );
}
/*
* Now, for the fun stuff -- Computing the Non-Galerkin Operator
*/
/* Initialize the ijmatrix, leveraging our knowledge of the nonzero
* structure in Pattern */
ierr += HYPRE_IJMatrixCreate(comm, first_col_diag_RAP, last_col_diag_RAP,
first_col_diag_RAP, last_col_diag_RAP, &ijmatrix);
ierr += HYPRE_IJMatrixSetObjectType(ijmatrix, HYPRE_PARCSR);
rownz = hypre_CTAlloc(HYPRE_Int, num_variables, HYPRE_MEMORY_HOST);
for(i = 0; i < num_variables; i++)
{ rownz[i] = 1.2*(Pattern_diag_i[i+1] - Pattern_diag_i[i]) + 1.2*(Pattern_offd_i[i+1] - Pattern_offd_i[i]); }
HYPRE_IJMatrixSetRowSizes(ijmatrix, rownz);
ierr += HYPRE_IJMatrixInitialize(ijmatrix);
hypre_TFree(rownz, HYPRE_MEMORY_HOST);
/*
*For efficiency, we do a buffered IJAddToValues.
* Here, we initialize the buffer and then initialize the buffer counters
*/
ijbuf_size = 1000;
ijbuf_data = hypre_CTAlloc(HYPRE_Real, ijbuf_size, HYPRE_MEMORY_DEVICE);
ijbuf_cols = hypre_CTAlloc(HYPRE_BigInt, ijbuf_size, HYPRE_MEMORY_DEVICE);
ijbuf_rownums = hypre_CTAlloc(HYPRE_BigInt, ijbuf_size, HYPRE_MEMORY_DEVICE);
ijbuf_numcols = hypre_CTAlloc(HYPRE_Int, ijbuf_size, HYPRE_MEMORY_DEVICE);
hypre_NonGalerkinIJBigBufferInit( &ijbuf_cnt, &ijbuf_rowcounter, ijbuf_cols );
if(sym_collapse)
{
ijbuf_sym_data = hypre_CTAlloc(HYPRE_Real, ijbuf_size, HYPRE_MEMORY_DEVICE);
ijbuf_sym_cols = hypre_CTAlloc(HYPRE_BigInt, ijbuf_size, HYPRE_MEMORY_DEVICE);
ijbuf_sym_rownums= hypre_CTAlloc(HYPRE_BigInt, ijbuf_size, HYPRE_MEMORY_DEVICE);
ijbuf_sym_numcols= hypre_CTAlloc(HYPRE_Int, ijbuf_size, HYPRE_MEMORY_DEVICE);
hypre_NonGalerkinIJBigBufferInit( &ijbuf_sym_cnt, &ijbuf_sym_rowcounter, ijbuf_sym_cols );
}
/*
* Eliminate Entries In RAP_diag
* */
for(i = 0; i < num_variables; i++)
{
global_row = i+first_col_diag_RAP;
row_start = RAP_diag_i[i];
row_end = RAP_diag_i[i+1];
has_row_ended = 0;
/* Only do work if row has nonzeros */
if( row_start < row_end)
{
/* Grab pointer to current entry in Pattern_diag */
current_Pattern_j = Pattern_diag_i[i];
col_indx_Pattern = Pattern_diag_j[current_Pattern_j];
/* Grab this row's indices out of Pattern offd and diag. This will
* be for computing index set intersections for lumping */
/* Ensure adequate length */
Pattern_offd_indices_len = Pattern_offd_i[i+1] - Pattern_offd_i[i];
if(Pattern_offd_indices_allocated_len < Pattern_offd_indices_len)
{
hypre_TFree(Pattern_offd_indices, HYPRE_MEMORY_HOST);
Pattern_offd_indices = hypre_CTAlloc(HYPRE_BigInt, Pattern_offd_indices_len, HYPRE_MEMORY_HOST);
Pattern_offd_indices_allocated_len = Pattern_offd_indices_len;
}
/* Grab sub array from col_map, corresponding to the slice of Pattern_offd_j */
hypre_GrabSubArray(Pattern_offd_j,
Pattern_offd_i[i], Pattern_offd_i[i+1]-1,
col_map_offd_Pattern, Pattern_offd_indices);
/* No need to grab info out of Pattern_diag_j[...], here we just start from
* Pattern_diag_i[i] and end at index Pattern_diag_i[i+1] - 1. We do need to
* ignore the diagonal entry in Pattern, because we don't lump entries there */
if( Pattern_diag_j[Pattern_diag_i[i]] == i )
{
Pattern_indices_ptr = &( Pattern_diag_j[Pattern_diag_i[i]+1]);
Pattern_diag_indices_len = Pattern_diag_i[i+1] - Pattern_diag_i[i] - 1;
}
else
{
Pattern_indices_ptr = &( Pattern_diag_j[Pattern_diag_i[i]]);
Pattern_diag_indices_len = Pattern_diag_i[i+1] - Pattern_diag_i[i];
}
}
for(j = row_start; j < row_end; j++)
{
col_indx_RAP = RAP_diag_j[j];
/* Ignore zero entries in RAP */
if( RAP_diag_data[j] != 0.0)
{
/* Don't change the diagonal, just write it */
if(col_indx_RAP == i)
{
/*#ifdef HY PRE_USING_OPENMP
#pragma omp critical (IJAdd)
#endif
{*/
/* For efficiency, we do a buffered IJAddToValues.
* A[global_row, global_row] += RAP_diag_data[j] */
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
global_row, RAP_diag_data[j] );
/*}*/
}
/* The entry in RAP does not appear in Pattern, so LUMP it */
else if( (col_indx_RAP < col_indx_Pattern) || has_row_ended)
{
/* Lump entry (i, col_indx_RAP) in RAP */
/* Grab the indices for row col_indx_RAP of S_offd and diag. This will
* be for computing lumping locations */
S_offd_indices_len = S_offd_i[col_indx_RAP+1] - S_offd_i[col_indx_RAP];
if(S_offd_indices_allocated_len < S_offd_indices_len)
{
hypre_TFree(S_offd_indices, HYPRE_MEMORY_HOST);
S_offd_indices = hypre_CTAlloc(HYPRE_BigInt, S_offd_indices_len, HYPRE_MEMORY_HOST);
S_offd_indices_allocated_len = S_offd_indices_len;
}
/* Grab sub array from col_map, corresponding to the slice of S_offd_j */
hypre_GrabSubArray(S_offd_j, S_offd_i[col_indx_RAP], S_offd_i[col_indx_RAP+1]-1,
col_map_offd_S, S_offd_indices);
/* No need to grab info out of S_diag_j[...], here we just start from
* S_diag_i[col_indx_RAP] and end at index S_diag_i[col_indx_RAP+1] - 1 */
/* Intersect the diag and offd pieces, remembering that the
* diag array will need to have the offset +first_col_diag_RAP */
cnt = hypre_max(S_offd_indices_len, Pattern_offd_indices_len);
if(offd_intersection_allocated_len < cnt)
{
hypre_TFree(offd_intersection, HYPRE_MEMORY_HOST);
hypre_TFree(offd_intersection_data, HYPRE_MEMORY_HOST);
offd_intersection = hypre_CTAlloc(HYPRE_BigInt, cnt, HYPRE_MEMORY_HOST);
offd_intersection_data = hypre_CTAlloc(HYPRE_Real, cnt, HYPRE_MEMORY_HOST);
offd_intersection_allocated_len = cnt;
}
/* This intersection also tracks S_offd_data and assumes that
* S_offd_indices is the first argument here */
hypre_IntersectTwoBigArrays(S_offd_indices,
&(S_offd_data[ S_offd_i[col_indx_RAP] ]),
S_offd_indices_len,
Pattern_offd_indices,
Pattern_offd_indices_len,
offd_intersection,
offd_intersection_data,
&offd_intersection_len);
/* Now, intersect the indices for the diag block. Note that S_diag_j does
* not have a diagonal entry, so no lumping occurs to the diagonal. */
cnt = hypre_max(Pattern_diag_indices_len,
S_diag_i[col_indx_RAP+1] - S_diag_i[col_indx_RAP] );
if(diag_intersection_allocated_len < cnt)
{
hypre_TFree(diag_intersection, HYPRE_MEMORY_HOST);
hypre_TFree(diag_intersection_data, HYPRE_MEMORY_HOST);
diag_intersection = hypre_CTAlloc(HYPRE_Int, cnt, HYPRE_MEMORY_HOST);
diag_intersection_data = hypre_CTAlloc(HYPRE_Real, cnt, HYPRE_MEMORY_HOST);
diag_intersection_allocated_len = cnt;
}
/* There is no diagonal entry in first position of S */
hypre_IntersectTwoArrays( &(S_diag_j[S_diag_i[col_indx_RAP]]),
&(S_diag_data[ S_diag_i[col_indx_RAP] ]),
S_diag_i[col_indx_RAP+1] - S_diag_i[col_indx_RAP],
Pattern_indices_ptr,
Pattern_diag_indices_len,
diag_intersection,
diag_intersection_data,
&diag_intersection_len);
/* Loop over these intersections, and lump a constant fraction of
* RAP_diag_data[j] to each entry */
intersection_len = diag_intersection_len + offd_intersection_len;
if(intersection_len > 0)
{
/* Sum the strength-of-connection values from row
* col_indx_RAP in S, corresponding to the indices we are
* collapsing to in row i This will give us our collapsing
* weights. */
sum_strong_neigh = 0.0;
for(k = 0; k < diag_intersection_len; k++)
{ sum_strong_neigh += fabs(diag_intersection_data[k]); }
for(k = 0; k < offd_intersection_len; k++)
{ sum_strong_neigh += fabs(offd_intersection_data[k]); }
sum_strong_neigh = RAP_diag_data[j]/sum_strong_neigh;
/* When lumping with the diag_intersection, must offset column index */
for(k = 0; k < diag_intersection_len; k++)
{
lump_value = lump_percent * fabs(diag_intersection_data[k])*sum_strong_neigh;
diagonal_lump_value = (1.0 - lump_percent) * fabs(diag_intersection_data[k])*sum_strong_neigh;
neg_lump_value = -1.0 * lump_value;
cnt = diag_intersection[k]+first_col_diag_RAP;
/*#ifdef HY PRE_USING_OPENMP
#pragma omp critical (IJAdd)
#endif
{*/
/* For efficiency, we do a buffered IJAddToValues.
* A[global_row, cnt] += RAP_diag_data[j] */
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
cnt, lump_value );
if (lump_percent < 1.0)
{
/* Preserve row sum by updating diagonal */
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
global_row, diagonal_lump_value );
}
/* Update mirror entries, if symmetric collapsing */
if(sym_collapse)
{
/* Update mirror entry */
hypre_NonGalerkinIJBufferWrite( ijmatrix,
&ijbuf_sym_cnt, ijbuf_size, &ijbuf_sym_rowcounter,
&ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols, cnt, global_row, lump_value );
/* Update mirror entry diagonal */
hypre_NonGalerkinIJBufferWrite( ijmatrix,
&ijbuf_sym_cnt, ijbuf_size, &ijbuf_sym_rowcounter,
&ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols, cnt, cnt, neg_lump_value );
}
/*}*/
}
/* The offd_intersection has global column indices, i.e., the
* col_map arrays contain global indices */
for(k = 0; k < offd_intersection_len; k++)
{
lump_value = lump_percent * fabs(offd_intersection_data[k])*sum_strong_neigh;
diagonal_lump_value = (1.0 - lump_percent) * fabs(offd_intersection_data[k])*sum_strong_neigh;
neg_lump_value = -1.0 * lump_value;
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
offd_intersection[k], lump_value );
if (lump_percent < 1.0)
{
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
global_row, diagonal_lump_value );
}
/* Update mirror entries, if symmetric collapsing */
if (sym_collapse)
{
hypre_NonGalerkinIJBufferWrite( ijmatrix,
&ijbuf_sym_cnt, ijbuf_size, &ijbuf_sym_rowcounter,
&ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols, offd_intersection[k],
global_row, lump_value );
hypre_NonGalerkinIJBufferWrite( ijmatrix,
&ijbuf_sym_cnt, ijbuf_size, &ijbuf_sym_rowcounter,
&ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols, offd_intersection[k],
offd_intersection[k], neg_lump_value );
}
}
}
/* If intersection is empty, do not eliminate entry */
else
{
/* Don't forget to update mirror entry if collapsing symmetrically */
if (sym_collapse)
{ lump_value = 0.5*RAP_diag_data[j]; }
else
{ lump_value = RAP_diag_data[j]; }
cnt = col_indx_RAP+first_col_diag_RAP;
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
cnt, lump_value );
if (sym_collapse)
{
hypre_NonGalerkinIJBufferWrite( ijmatrix,
&ijbuf_sym_cnt, ijbuf_size, &ijbuf_sym_rowcounter,
&ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols, cnt, global_row, lump_value );
}
}
}
/* The entry in RAP appears in Pattern, so keep it */
else if(col_indx_RAP == col_indx_Pattern)
{
cnt = col_indx_RAP+first_col_diag_RAP;
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
cnt, RAP_diag_data[j] );
/* Only go to the next entry in Pattern, if this is not the end of a row */
if( current_Pattern_j < Pattern_diag_i[i+1]-1 )
{
current_Pattern_j += 1;
col_indx_Pattern = Pattern_diag_j[current_Pattern_j];
}
else
{ has_row_ended = 1;}
}
/* Increment col_indx_Pattern, and repeat this loop iter for current
* col_ind_RAP value */
else if(col_indx_RAP > col_indx_Pattern)
{
for(; current_Pattern_j < Pattern_diag_i[i+1]; current_Pattern_j++)
{
col_indx_Pattern = Pattern_diag_j[current_Pattern_j];
if(col_indx_RAP <= col_indx_Pattern)
{ break;}
}
/* If col_indx_RAP is still greater (i.e., we've reached a row end), then
* we need to lump everything else in this row */
if(col_indx_RAP > col_indx_Pattern)
{ has_row_ended = 1; }
/* Decrement j, in order to repeat this loop iteration for the current
* col_indx_RAP value */
j--;
}
}
}
}
/*
* Eliminate Entries In RAP_offd
* Structure of this for-loop is very similar to the RAP_diag for-loop
* But, not so similar that these loops should be combined into a single fuction.
* */
if(num_cols_RAP_offd)
{
for(i = 0; i < num_variables; i++)
{
global_row = i+first_col_diag_RAP;
row_start = RAP_offd_i[i];
row_end = RAP_offd_i[i+1];
has_row_ended = 0;
/* Only do work if row has nonzeros */
if( row_start < row_end)
{
current_Pattern_j = Pattern_offd_i[i];
Pattern_offd_indices_len = Pattern_offd_i[i+1] - Pattern_offd_i[i];
if( (Pattern_offd_j != NULL) && (Pattern_offd_indices_len > 0) )
{ col_indx_Pattern = col_map_offd_Pattern[ Pattern_offd_j[current_Pattern_j] ]; }
else
{ /* if Pattern_offd_j is not allocated or this is a zero length row,
then all entries need to be lumped.
This is an analagous situation to has_row_ended=1. */
col_indx_Pattern = -1;
has_row_ended = 1;
}
/* Grab this row's indices out of Pattern offd and diag. This will
* be for computing index set intersections for lumping. The above
* loop over RAP_diag ensures adequate length of Pattern_offd_indices */
/* Ensure adequate length */
hypre_GrabSubArray(Pattern_offd_j,
Pattern_offd_i[i], Pattern_offd_i[i+1]-1,
col_map_offd_Pattern, Pattern_offd_indices);
/* No need to grab info out of Pattern_diag_j[...], here we just start from
* Pattern_diag_i[i] and end at index Pattern_diag_i[i+1] - 1. We do need to
* ignore the diagonal entry in Pattern, because we don't lump entries there */
if( Pattern_diag_j[Pattern_diag_i[i]] == i )
{
Pattern_indices_ptr = &( Pattern_diag_j[Pattern_diag_i[i]+1]);
Pattern_diag_indices_len = Pattern_diag_i[i+1] - Pattern_diag_i[i] - 1;
}
else
{
Pattern_indices_ptr = &( Pattern_diag_j[Pattern_diag_i[i]]);
Pattern_diag_indices_len = Pattern_diag_i[i+1] - Pattern_diag_i[i];
}
}
for(j = row_start; j < row_end; j++)
{
/* Ignore zero entries in RAP */
if( RAP_offd_data[j] != 0.0)
{
/* In general for all the offd_j arrays, we have to indirectly
* index with the col_map_offd array to get a global index */
col_indx_RAP = col_map_offd_RAP[ RAP_offd_j[j] ];
/* The entry in RAP does not appear in Pattern, so LUMP it */
if( (col_indx_RAP < col_indx_Pattern) || has_row_ended)
{
/* The row_indx_Sext would be found with:
row_indx_Sext = hypre_BinarySearch(col_map_offd_RAP, col_indx_RAP, num_cols_RAP_offd);
But, we already know the answer to this with, */
row_indx_Sext = RAP_offd_j[j];
/* Grab the indices for row row_indx_Sext from the offd and diag parts. This will
* be for computing lumping locations */
S_offd_indices_len = S_ext_offd_i[row_indx_Sext+1] - S_ext_offd_i[row_indx_Sext];
if(S_offd_indices_allocated_len < S_offd_indices_len)
{
hypre_TFree(S_offd_indices, HYPRE_MEMORY_HOST);
S_offd_indices = hypre_CTAlloc(HYPRE_BigInt, S_offd_indices_len, HYPRE_MEMORY_HOST);
S_offd_indices_allocated_len = S_offd_indices_len;
}
/* Grab sub array from col_map, corresponding to the slice of S_ext_offd_j */
hypre_GrabSubArray(S_ext_offd_j, S_ext_offd_i[row_indx_Sext], S_ext_offd_i[row_indx_Sext+1]-1,
col_map_offd_Sext, S_offd_indices);
/* No need to grab info out of S_ext_diag_j[...], here we just start from
* S_ext_diag_i[row_indx_Sext] and end at index S_ext_diag_i[row_indx_Sext+1] - 1 */
/* Intersect the diag and offd pieces, remembering that the
* diag array will need to have the offset +first_col_diag_RAP */
cnt = hypre_max(S_offd_indices_len, Pattern_offd_indices_len);
if(offd_intersection_allocated_len < cnt)
{
hypre_TFree(offd_intersection, HYPRE_MEMORY_HOST);
hypre_TFree(offd_intersection_data, HYPRE_MEMORY_HOST);
offd_intersection = hypre_CTAlloc(HYPRE_BigInt, cnt, HYPRE_MEMORY_HOST);
offd_intersection_data = hypre_CTAlloc(HYPRE_Real, cnt, HYPRE_MEMORY_HOST);
offd_intersection_allocated_len = cnt;
}
hypre_IntersectTwoBigArrays(S_offd_indices,
&(S_ext_offd_data[ S_ext_offd_i[row_indx_Sext] ]),
S_offd_indices_len,
Pattern_offd_indices,
Pattern_offd_indices_len,
offd_intersection,
offd_intersection_data,
&offd_intersection_len);
/* Now, intersect the indices for the diag block. */
cnt = hypre_max(Pattern_diag_indices_len,
S_ext_diag_i[row_indx_Sext+1] - S_ext_diag_i[row_indx_Sext] );
if(diag_intersection_allocated_len < cnt)
{
hypre_TFree(diag_intersection, HYPRE_MEMORY_HOST);
hypre_TFree(diag_intersection_data, HYPRE_MEMORY_HOST);
diag_intersection = hypre_CTAlloc(HYPRE_Int, cnt, HYPRE_MEMORY_HOST);
diag_intersection_data = hypre_CTAlloc(HYPRE_Real, cnt, HYPRE_MEMORY_HOST);
diag_intersection_allocated_len = cnt;
}
hypre_IntersectTwoArrays( &(S_ext_diag_j[S_ext_diag_i[row_indx_Sext]]),
&(S_ext_diag_data[ S_ext_diag_i[row_indx_Sext] ]),
S_ext_diag_i[row_indx_Sext+1] - S_ext_diag_i[row_indx_Sext],
Pattern_indices_ptr,
Pattern_diag_indices_len,
diag_intersection,
diag_intersection_data,
&diag_intersection_len);
/* Loop over these intersections, and lump a constant fraction of
* RAP_offd_data[j] to each entry */
intersection_len = diag_intersection_len + offd_intersection_len;
if(intersection_len > 0)
{
/* Sum the strength-of-connection values from row
* row_indx_Sext in S, corresponding to the indices we are
* collapsing to in row i. This will give us our collapsing
* weights. */
sum_strong_neigh = 0.0;
for(k = 0; k < diag_intersection_len; k++)
{ sum_strong_neigh += fabs(diag_intersection_data[k]); }
for(k = 0; k < offd_intersection_len; k++)
{ sum_strong_neigh += fabs(offd_intersection_data[k]); }
sum_strong_neigh = RAP_offd_data[j]/sum_strong_neigh;
/* When lumping with the diag_intersection, must offset column index */
for(k = 0; k < diag_intersection_len; k++)
{
lump_value = lump_percent * fabs(diag_intersection_data[k])*sum_strong_neigh;
diagonal_lump_value = (1.0 - lump_percent) * fabs(diag_intersection_data[k])*sum_strong_neigh;
neg_lump_value = -1.0 * lump_value;
cnt = diag_intersection[k]+first_col_diag_RAP;
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row, cnt, lump_value );
if (lump_percent < 1.0)
{
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row, global_row,
diagonal_lump_value );
}
/* Update mirror entries, if symmetric collapsing */
if (sym_collapse)
{
hypre_NonGalerkinIJBufferWrite( ijmatrix,
&ijbuf_sym_cnt, ijbuf_size,
&ijbuf_sym_rowcounter, &ijbuf_sym_data,
&ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols, cnt, global_row, lump_value);
hypre_NonGalerkinIJBufferWrite( ijmatrix,
&ijbuf_sym_cnt, ijbuf_size,
&ijbuf_sym_rowcounter, &ijbuf_sym_data,
&ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols, cnt, cnt, neg_lump_value );
}
}
/* The offd_intersection has global column indices, i.e., the
* col_map arrays contain global indices */
for(k = 0; k < offd_intersection_len; k++)
{
lump_value = lump_percent * fabs(offd_intersection_data[k])*sum_strong_neigh;
diagonal_lump_value = (1.0 - lump_percent) * fabs(offd_intersection_data[k])*sum_strong_neigh;
neg_lump_value = -1.0 * lump_value;
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
offd_intersection[k], lump_value );
if (lump_percent < 1.0)
{
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row, global_row,
diagonal_lump_value );
}
/* Update mirror entries, if symmetric collapsing */
if (sym_collapse)
{
hypre_NonGalerkinIJBufferWrite( ijmatrix,
&ijbuf_sym_cnt, ijbuf_size,
&ijbuf_sym_rowcounter, &ijbuf_sym_data,
&ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols, offd_intersection[k],
global_row, lump_value );
hypre_NonGalerkinIJBufferWrite( ijmatrix,
&ijbuf_sym_cnt, ijbuf_size,
&ijbuf_sym_rowcounter, &ijbuf_sym_data,
&ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols, offd_intersection[k],
offd_intersection[k], neg_lump_value );
}
}
}
/* If intersection is empty, do not eliminate entry */
else
{
/* Don't forget to update mirror entry if collapsing symmetrically */
if (sym_collapse)
{ lump_value = 0.5*RAP_offd_data[j]; }
else
{ lump_value = RAP_offd_data[j]; }
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row, col_indx_RAP,
lump_value );
if (sym_collapse)
{
hypre_NonGalerkinIJBufferWrite( ijmatrix,
&ijbuf_sym_cnt, ijbuf_size, &ijbuf_sym_rowcounter,
&ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols, col_indx_RAP, global_row,
lump_value );
}
}
}
/* The entry in RAP appears in Pattern, so keep it */
else if (col_indx_RAP == col_indx_Pattern)
{
/* For the offd structure, col_indx_RAP is a global dof number */
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row, col_indx_RAP,
RAP_offd_data[j]);
/* Only go to the next entry in Pattern, if this is not the end of a row */
if( current_Pattern_j < Pattern_offd_i[i+1]-1 )
{
current_Pattern_j += 1;
col_indx_Pattern = col_map_offd_Pattern[ Pattern_offd_j[current_Pattern_j] ];
}
else
{ has_row_ended = 1;}
}
/* Increment col_indx_Pattern, and repeat this loop iter for current
* col_ind_RAP value */
else if(col_indx_RAP > col_indx_Pattern)
{
for(; current_Pattern_j < Pattern_offd_i[i+1]; current_Pattern_j++)
{
col_indx_Pattern = col_map_offd_Pattern[ Pattern_offd_j[current_Pattern_j] ];
if(col_indx_RAP <= col_indx_Pattern)
{ break;}
}
/* If col_indx_RAP is still greater (i.e., we've reached a row end), then
* we need to lump everything else in this row */
if(col_indx_RAP > col_indx_Pattern)
{ has_row_ended = 1; }
/* Decrement j, in order to repeat this loop iteration for the current
* col_indx_RAP value */
j--;
}
}
}
}
}
/* For efficiency, we do a buffered IJAddToValues.
* This empties the buffer of any remaining values */
hypre_NonGalerkinIJBufferEmpty(ijmatrix, ijbuf_size, &ijbuf_cnt, ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols);
if(sym_collapse)
hypre_NonGalerkinIJBufferEmpty(ijmatrix, ijbuf_size, &ijbuf_sym_cnt, ijbuf_sym_rowcounter,
&ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols);
/* Assemble non-Galerkin Matrix, and overwrite current RAP*/
ierr += HYPRE_IJMatrixAssemble (ijmatrix);
ierr += HYPRE_IJMatrixGetObject( ijmatrix, (void**) RAP_ptr);
/* Optional diagnostic matrix printing */
if (0)
{
hypre_sprintf(filename, "Pattern_%d.ij", global_num_vars);
hypre_ParCSRMatrixPrintIJ(Pattern, 0, 0, filename);
hypre_sprintf(filename, "Strength_%d.ij", global_num_vars);
hypre_ParCSRMatrixPrintIJ(S, 0, 0, filename);
hypre_sprintf(filename, "RAP_%d.ij", global_num_vars);
hypre_ParCSRMatrixPrintIJ(RAP, 0, 0, filename);
hypre_sprintf(filename, "RAPc_%d.ij", global_num_vars);
hypre_ParCSRMatrixPrintIJ(*RAP_ptr, 0, 0, filename);
hypre_sprintf(filename, "AP_%d.ij", global_num_vars);
hypre_ParCSRMatrixPrintIJ(AP, 0, 0, filename);
}
/* Free matrices and variables and arrays */
hypre_TFree(ijbuf_data, HYPRE_MEMORY_DEVICE);
hypre_TFree(ijbuf_cols, HYPRE_MEMORY_DEVICE);
hypre_TFree(ijbuf_rownums, HYPRE_MEMORY_DEVICE);
hypre_TFree(ijbuf_numcols, HYPRE_MEMORY_DEVICE);
if(sym_collapse)
{
hypre_TFree(ijbuf_sym_data, HYPRE_MEMORY_DEVICE);
hypre_TFree(ijbuf_sym_cols, HYPRE_MEMORY_DEVICE);
hypre_TFree(ijbuf_sym_rownums, HYPRE_MEMORY_DEVICE);
hypre_TFree(ijbuf_sym_numcols, HYPRE_MEMORY_DEVICE);
}
hypre_TFree(Pattern_offd_indices, HYPRE_MEMORY_HOST);
hypre_TFree(S_ext_diag_i, HYPRE_MEMORY_HOST);
hypre_TFree(S_ext_offd_i, HYPRE_MEMORY_HOST);
hypre_TFree(S_offd_indices, HYPRE_MEMORY_HOST);
hypre_TFree(offd_intersection, HYPRE_MEMORY_HOST);
hypre_TFree(offd_intersection_data, HYPRE_MEMORY_HOST);
hypre_TFree(diag_intersection, HYPRE_MEMORY_HOST);
hypre_TFree(diag_intersection_data, HYPRE_MEMORY_HOST);
if (S_ext_diag_size)
{
hypre_TFree(S_ext_diag_j, HYPRE_MEMORY_HOST);
hypre_TFree(S_ext_diag_data, HYPRE_MEMORY_HOST);
}
if (S_ext_offd_size)
{
hypre_TFree(S_ext_offd_j, HYPRE_MEMORY_HOST);
hypre_TFree(S_ext_offd_data, HYPRE_MEMORY_HOST);
}
if (num_cols_offd_Sext)
{ hypre_TFree(col_map_offd_Sext, HYPRE_MEMORY_HOST); }
ierr += hypre_ParCSRMatrixDestroy(Pattern);
ierr += hypre_ParCSRMatrixDestroy(RAP);
ierr += hypre_ParCSRMatrixDestroy(S);
ierr += HYPRE_IJMatrixSetObjectType(ijmatrix, -1);
ierr += HYPRE_IJMatrixDestroy(ijmatrix);
/*end_time = hypre_MPI_Wtime();
if(my_id == 0)
{ fprintf(stdout, "NonGalerkin Time: %1.2e\n", end_time-start_time); } */
return ierr;
}
|
ten_tusscher_2004_epi_S2_18.c | //Original Ten Tusscher
#include <assert.h>
#include <stdlib.h>
#include "ten_tusscher_2004_epi_S2_18.h"
GET_CELL_MODEL_DATA(init_cell_model_data) {
assert(cell_model);
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
//TODO: this should be called only once for the whole mesh, like in the GPU code
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) {
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.5796889202933,0.00128692474306994,0.779986988202943,0.779770791697917,0.000174545903588985,0.485208984809148,0.00293791506502332,0.999998351833850,1.92983618570011e-08,1.88818815072613e-05,0.999771197687931,1.00750321613571,0.999998909683442,3.54972527811597e-05,0.944932711682890,9.89547163051393,139.466639319991};
for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) {
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++) {
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = i;
for (int j = 0; j < num_steps; ++j) {
solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu(real dt, real *sv, real stim_current) {
assert(sv);
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) {
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
/// real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
//#ifdef EPI
real Gto=0.294;
//#endif
// #ifdef ENDO
// real Gto=0.073;
//#endif
//#ifdef MCELL
// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={14.1371645621504,0.000235463229709454,0.000162916042137935,0.000478823778054463,0.274861275791603,0.169442290631956,0.169502544648317,3.70994918819970,0.0182069316194050,1.80784826217638,1082.55200455351,0.000477041616507721,0.348836801394631,0.0189299653755662,0.00511025973273311,1.19845261627905e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
///A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
multithread_test.c | #include <stdio.h>
#include <omp.h>
int main(){
printf("I am master thread!\n");
#pragma omp parallel
{
printf("I am multithread!\n");
}
return 0;
} |
heat_3d-p.pluto.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Discretized 3D heat equation stencil with non periodic boundary conditions
* Adapted from Pochoir test bench
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <math.h>
/*
* N is the number of points
* T is the number of timesteps
*/
#ifdef HAS_DECLS
#include "decls.h"
#else
#define N 800L
#define T 1L
#endif
#define NUM_FP_OPS 15
/* Define our arrays */
//double A[2][N][N][N];
double total=0; double sum_err_sqr=0;
int chtotal=0;
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) {
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000) {
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char * argv[]) {
long int t, i, j, k;
const int BASE = 1024;
long count=0;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0;
double ****A = (double ****)malloc(2 * sizeof (double ***));
int l;
for (l = 0; l < 2; l++){
A[l] = (double ***) malloc(N * sizeof(double **));
for (i = 0; i < N; i++){
A[l][i] = (double **) malloc(N * sizeof(double *));
for (j = 0; j < N; j++)
A[l][i][j] = (double *) malloc(N * sizeof (double));
}
}
printf("Number of points = %ld\t|Number of timesteps = %ld\t", N, T);
/* Initialization */
srand(42); // seed with a constant value to verify results
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
for (k = 0; k < N; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
A[1][i][j][k] = 0.0;
}
}
}
#ifdef TIME
gettimeofday(&start, 0);
#endif
#undef N
#define N 400L
#undef T
#define T 1L
/* Copyright (C) 1991-2012 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* We do support the IEC 559 math functionality, real and complex. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14, t15;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((N >= 1) && (T >= 1)) {
for (t1=0;t1<=T-1;t1++) {
lbp=0;
ubp=N-1;
#pragma omp parallel for private(lbv,ubv,t11,t12,t13,t14,t15)
for (t10=lbp;t10<=ubp;t10++) {
for (t11=0;t11<=floord(N-1,32);t11++) {
for (t12=0;t12<=floord(N-1,128);t12++) {
for (t14=32*t11;t14<=min(N-1,32*t11+31);t14++) {
lbv=128*t12;
ubv=min(N-1,128*t12+127);
#pragma ivdep
#pragma vector always
for (t15=lbv;t15<=ubv;t15++) {
A[1][2*N-1-(t10)][2*N-1-(t14)][t15] = 0.125 * ( ((t10==0)? A[0][(0)][2*N-1-(t14)][t15]: A[0][(2*N-1-(t10)+1)][2*N-1-(t14)][t15]) - 2.0 * A[0][2*N-1-(t10)][2*N-1-(t14)][t15] + A[0][2*N-1-(t10)-1][2*N-1-(t14)][t15]) + 0.125 * ( ((t14==0)? A[0][2*N-1-(t10)][(0)][t15]: A[0][2*N-1-(t10)][(2*N-1-(t14)+1)][t15]) - 2.0 * A[0][2*N-1-(t10)][2*N-1-(t14)][t15] + A[0][2*N-1-(t10)][2*N-1-(t14)-1][t15]) + 0.125 * ( ((t15==0)? A[0][2*N-1-(t10)][2*N-1-(t14)][(2*N-1)]: A[0][2*N-1-(t10)][2*N-1-(t14)][(t15-1)]) - 2.0 * A[0][2*N-1-(t10)][2*N-1-(t14)][t15] + A[0][2*N-1-(t10)][2*N-1-(t14)][t15+1]) + A[0][2*N-1-(t10)][2*N-1-(t14)][t15];;
A[1][t10][t14][2*N-1-(t15)] = 0.125 * ( A[0][t10+1][t14][2*N-1-(t15)] - 2.0 * A[0][t10][t14][2*N-1-(t15)] + ((t10==0)? A[0][(2*N-1)][t14][2*N-1-(t15)]: A[0][(t10-1)][t14][2*N-1-(t15)])) + 0.125 * ( A[0][t10][t14+1][2*N-1-(t15)] - 2.0 * A[0][t10][t14][2*N-1-(t15)] + ((t14==0)? A[0][t10][(2*N-1)][2*N-1-(t15)]: A[0][t10][(t14-1)][2*N-1-(t15)])) + 0.125 * ( A[0][t10][t14][2*N-1-(t15)-1] - 2.0 * A[0][t10][t14][2*N-1-(t15)] + ((t15==0)? A[0][t10][t14][(0)]: A[0][t10][t14][(2*N-1-(t15)+1)])) + A[0][t10][t14][2*N-1-(t15)];;
A[1][2*N-1-(t10)][t14][2*N-1-(t15)] = 0.125 * ( ((t10==0)? A[0][(0)][t14][2*N-1-(t15)]: A[0][(2*N-1-(t10)+1)][t14][2*N-1-(t15)]) - 2.0 * A[0][2*N-1-(t10)][t14][2*N-1-(t15)] + A[0][2*N-1-(t10)-1][t14][2*N-1-(t15)]) + 0.125 * ( A[0][2*N-1-(t10)][t14+1][2*N-1-(t15)] - 2.0 * A[0][2*N-1-(t10)][t14][2*N-1-(t15)] + ((t14==0)? A[0][2*N-1-(t10)][(2*N-1)][2*N-1-(t15)]: A[0][2*N-1-(t10)][(t14-1)][2*N-1-(t15)])) + 0.125 * ( A[0][2*N-1-(t10)][t14][2*N-1-(t15)-1] - 2.0 * A[0][2*N-1-(t10)][t14][2*N-1-(t15)] + ((t15==0)? A[0][2*N-1-(t10)][t14][(0)]: A[0][2*N-1-(t10)][t14][(2*N-1-(t15)+1)])) + A[0][2*N-1-(t10)][t14][2*N-1-(t15)];;
A[1][t10][2*N-1-(t14)][2*N-1-(t15)] = 0.125 * ( A[0][t10+1][2*N-1-(t14)][2*N-1-(t15)] - 2.0 * A[0][t10][2*N-1-(t14)][2*N-1-(t15)] + ((t10==0)? A[0][(2*N-1)][2*N-1-(t14)][2*N-1-(t15)]: A[0][(t10-1)][2*N-1-(t14)][2*N-1-(t15)])) + 0.125 * ( ((t14==0)? A[0][t10][(0)][2*N-1-(t15)]: A[0][t10][(2*N-1-(t14)+1)][2*N-1-(t15)]) - 2.0 * A[0][t10][2*N-1-(t14)][2*N-1-(t15)] + A[0][t10][2*N-1-(t14)-1][2*N-1-(t15)]) + 0.125 * ( A[0][t10][2*N-1-(t14)][2*N-1-(t15)-1] - 2.0 * A[0][t10][2*N-1-(t14)][2*N-1-(t15)] + ((t15==0)? A[0][t10][2*N-1-(t14)][(0)]: A[0][t10][2*N-1-(t14)][(2*N-1-(t15)+1)])) + A[0][t10][2*N-1-(t14)][2*N-1-(t15)];;
A[1][2*N-1-(t10)][2*N-1-(t14)][2*N-1-(t15)] = 0.125 * ( ((t10==0)? A[0][(0)][2*N-1-(t14)][2*N-1-(t15)]: A[0][(2*N-1-(t10)+1)][2*N-1-(t14)][2*N-1-(t15)]) - 2.0 * A[0][2*N-1-(t10)][2*N-1-(t14)][2*N-1-(t15)] + A[0][2*N-1-(t10)-1][2*N-1-(t14)][2*N-1-(t15)]) + 0.125 * ( ((t14==0)? A[0][2*N-1-(t10)][(0)][2*N-1-(t15)]: A[0][2*N-1-(t10)][(2*N-1-(t14)+1)][2*N-1-(t15)]) - 2.0 * A[0][2*N-1-(t10)][2*N-1-(t14)][2*N-1-(t15)] + A[0][2*N-1-(t10)][2*N-1-(t14)-1][2*N-1-(t15)]) + 0.125 * ( A[0][2*N-1-(t10)][2*N-1-(t14)][2*N-1-(t15)-1] - 2.0 * A[0][2*N-1-(t10)][2*N-1-(t14)][2*N-1-(t15)] + ((t15==0)? A[0][2*N-1-(t10)][2*N-1-(t14)][(0)]: A[0][2*N-1-(t10)][2*N-1-(t14)][(2*N-1-(t15)+1)])) + A[0][2*N-1-(t10)][2*N-1-(t14)][2*N-1-(t15)];;
}
}
}
}
}
lbp=0;
ubp=N-1;
#pragma omp parallel for private(lbv,ubv,t11,t12,t13,t14,t15)
for (t10=lbp;t10<=ubp;t10++) {
for (t11=0;t11<=floord(N-1,32);t11++) {
for (t12=0;t12<=floord(N-1,128);t12++) {
for (t14=32*t11;t14<=min(N-1,32*t11+31);t14++) {
lbv=128*t12;
ubv=min(N-1,128*t12+127);
#pragma ivdep
#pragma vector always
for (t15=lbv;t15<=ubv;t15++) {
A[1][t10][2*N-1-(t14)][t15] = 0.125 * ( A[0][t10+1][2*N-1-(t14)][t15] - 2.0 * A[0][t10][2*N-1-(t14)][t15] + ((t10==0)? A[0][(2*N-1)][2*N-1-(t14)][t15]: A[0][(t10-1)][2*N-1-(t14)][t15])) + 0.125 * ( ((t14==0)? A[0][t10][(0)][t15]: A[0][t10][(2*N-1-(t14)+1)][t15]) - 2.0 * A[0][t10][2*N-1-(t14)][t15] + A[0][t10][2*N-1-(t14)-1][t15]) + 0.125 * ( ((t15==0)? A[0][t10][2*N-1-(t14)][(2*N-1)]: A[0][t10][2*N-1-(t14)][(t15-1)]) - 2.0 * A[0][t10][2*N-1-(t14)][t15] + A[0][t10][2*N-1-(t14)][t15+1]) + A[0][t10][2*N-1-(t14)][t15];;
A[0][2*N-1-(t10)][2*N-1-(t14)][2*N-1-(t15)] = 0.125 * ( ((t10==0)? A[1][(0)][2*N-1-(t14)][2*N-1-(t15)]: A[1][(2*N-1-(t10)+1)][2*N-1-(t14)][2*N-1-(t15)]) - 2.0 * A[1][2*N-1-(t10)][2*N-1-(t14)][2*N-1-(t15)] + A[1][2*N-1-(t10)-1][2*N-1-(t14)][2*N-1-(t15)]) + 0.125 * ( ((t14==0)? A[1][2*N-1-(t10)][(0)][2*N-1-(t15)]: A[1][2*N-1-(t10)][(2*N-1-(t14)+1)][2*N-1-(t15)]) - 2.0 * A[1][2*N-1-(t10)][2*N-1-(t14)][2*N-1-(t15)] + A[1][2*N-1-(t10)][2*N-1-(t14)-1][2*N-1-(t15)]) + 0.125 * ( A[1][2*N-1-(t10)][2*N-1-(t14)][2*N-1-(t15)-1] - 2.0 * A[1][2*N-1-(t10)][2*N-1-(t14)][2*N-1-(t15)] + ((t15==0)? A[1][2*N-1-(t10)][2*N-1-(t14)][(0)]: A[1][2*N-1-(t10)][2*N-1-(t14)][(2*N-1-(t15)+1)])) + A[1][2*N-1-(t10)][2*N-1-(t14)][2*N-1-(t15)];;
}
}
}
}
}
lbp=0;
ubp=N-1;
#pragma omp parallel for private(lbv,ubv,t11,t12,t13,t14,t15)
for (t10=lbp;t10<=ubp;t10++) {
for (t11=0;t11<=floord(N-1,32);t11++) {
for (t12=0;t12<=floord(N-1,128);t12++) {
for (t14=32*t11;t14<=min(N-1,32*t11+31);t14++) {
lbv=128*t12;
ubv=min(N-1,128*t12+127);
#pragma ivdep
#pragma vector always
for (t15=lbv;t15<=ubv;t15++) {
A[1][2*N-1-(t10)][t14][t15] = 0.125 * ( ((t10==0)? A[0][(0)][t14][t15]: A[0][(2*N-1-(t10)+1)][t14][t15]) - 2.0 * A[0][2*N-1-(t10)][t14][t15] + A[0][2*N-1-(t10)-1][t14][t15]) + 0.125 * ( A[0][2*N-1-(t10)][t14+1][t15] - 2.0 * A[0][2*N-1-(t10)][t14][t15] + ((t14==0)? A[0][2*N-1-(t10)][(2*N-1)][t15]: A[0][2*N-1-(t10)][(t14-1)][t15])) + 0.125 * ( ((t15==0)? A[0][2*N-1-(t10)][t14][(2*N-1)]: A[0][2*N-1-(t10)][t14][(t15-1)]) - 2.0 * A[0][2*N-1-(t10)][t14][t15] + A[0][2*N-1-(t10)][t14][t15+1]) + A[0][2*N-1-(t10)][t14][t15];;
A[0][t10][2*N-1-(t14)][2*N-1-(t15)] = 0.125 * ( A[1][t10+1][2*N-1-(t14)][2*N-1-(t15)] - 2.0 * A[1][t10][2*N-1-(t14)][2*N-1-(t15)] + ((t10==0)? A[1][(2*N-1)][2*N-1-(t14)][2*N-1-(t15)]: A[1][(t10-1)][2*N-1-(t14)][2*N-1-(t15)])) + 0.125 * ( ((t14==0)? A[1][t10][(0)][2*N-1-(t15)]: A[1][t10][(2*N-1-(t14)+1)][2*N-1-(t15)]) - 2.0 * A[1][t10][2*N-1-(t14)][2*N-1-(t15)] + A[1][t10][2*N-1-(t14)-1][2*N-1-(t15)]) + 0.125 * ( A[1][t10][2*N-1-(t14)][2*N-1-(t15)-1] - 2.0 * A[1][t10][2*N-1-(t14)][2*N-1-(t15)] + ((t15==0)? A[1][t10][2*N-1-(t14)][(0)]: A[1][t10][2*N-1-(t14)][(2*N-1-(t15)+1)])) + A[1][t10][2*N-1-(t14)][2*N-1-(t15)];;
}
}
}
}
}
lbp=0;
ubp=N-1;
#pragma omp parallel for private(lbv,ubv,t11,t12,t13,t14,t15)
for (t10=lbp;t10<=ubp;t10++) {
for (t11=0;t11<=floord(N-1,32);t11++) {
for (t12=0;t12<=floord(N-1,128);t12++) {
for (t14=32*t11;t14<=min(N-1,32*t11+31);t14++) {
lbv=128*t12;
ubv=min(N-1,128*t12+127);
#pragma ivdep
#pragma vector always
for (t15=lbv;t15<=ubv;t15++) {
A[0][2*N-1-(t10)][t14][2*N-1-(t15)] = 0.125 * ( ((t10==0)? A[1][(0)][t14][2*N-1-(t15)]: A[1][(2*N-1-(t10)+1)][t14][2*N-1-(t15)]) - 2.0 * A[1][2*N-1-(t10)][t14][2*N-1-(t15)] + A[1][2*N-1-(t10)-1][t14][2*N-1-(t15)]) + 0.125 * ( A[1][2*N-1-(t10)][t14+1][2*N-1-(t15)] - 2.0 * A[1][2*N-1-(t10)][t14][2*N-1-(t15)] + ((t14==0)? A[1][2*N-1-(t10)][(2*N-1)][2*N-1-(t15)]: A[1][2*N-1-(t10)][(t14-1)][2*N-1-(t15)])) + 0.125 * ( A[1][2*N-1-(t10)][t14][2*N-1-(t15)-1] - 2.0 * A[1][2*N-1-(t10)][t14][2*N-1-(t15)] + ((t15==0)? A[1][2*N-1-(t10)][t14][(0)]: A[1][2*N-1-(t10)][t14][(2*N-1-(t15)+1)])) + A[1][2*N-1-(t10)][t14][2*N-1-(t15)];;
}
}
}
}
}
lbp=0;
ubp=N-1;
#pragma omp parallel for private(lbv,ubv,t11,t12,t13,t14,t15)
for (t10=lbp;t10<=ubp;t10++) {
for (t11=0;t11<=floord(N-1,32);t11++) {
for (t12=0;t12<=floord(N-1,128);t12++) {
for (t14=32*t11;t14<=min(N-1,32*t11+31);t14++) {
lbv=128*t12;
ubv=min(N-1,128*t12+127);
#pragma ivdep
#pragma vector always
for (t15=lbv;t15<=ubv;t15++) {
A[1][t10][t14][t15] = 0.125 * ( A[0][t10+1][t14][t15] - 2.0 * A[0][t10][t14][t15] + ((t10==0)? A[0][(2*N-1)][t14][t15]: A[0][(t10-1)][t14][t15])) + 0.125 * ( A[0][t10][t14+1][t15] - 2.0 * A[0][t10][t14][t15] + ((t14==0)? A[0][t10][(2*N-1)][t15]: A[0][t10][(t14-1)][t15])) + 0.125 * ( ((t15==0)? A[0][t10][t14][(2*N-1)]: A[0][t10][t14][(t15-1)]) - 2.0 * A[0][t10][t14][t15] + A[0][t10][t14][t15+1]) + A[0][t10][t14][t15];;
A[0][2*N-1-(t10)][2*N-1-(t14)][t15] = 0.125 * ( ((t10==0)? A[1][(0)][2*N-1-(t14)][t15]: A[1][(2*N-1-(t10)+1)][2*N-1-(t14)][t15]) - 2.0 * A[1][2*N-1-(t10)][2*N-1-(t14)][t15] + A[1][2*N-1-(t10)-1][2*N-1-(t14)][t15]) + 0.125 * ( ((t14==0)? A[1][2*N-1-(t10)][(0)][t15]: A[1][2*N-1-(t10)][(2*N-1-(t14)+1)][t15]) - 2.0 * A[1][2*N-1-(t10)][2*N-1-(t14)][t15] + A[1][2*N-1-(t10)][2*N-1-(t14)-1][t15]) + 0.125 * ( ((t15==0)? A[1][2*N-1-(t10)][2*N-1-(t14)][(2*N-1)]: A[1][2*N-1-(t10)][2*N-1-(t14)][(t15-1)]) - 2.0 * A[1][2*N-1-(t10)][2*N-1-(t14)][t15] + A[1][2*N-1-(t10)][2*N-1-(t14)][t15+1]) + A[1][2*N-1-(t10)][2*N-1-(t14)][t15];;
}
}
}
}
}
lbp=0;
ubp=N-1;
#pragma omp parallel for private(lbv,ubv,t11,t12,t13,t14,t15)
for (t10=lbp;t10<=ubp;t10++) {
for (t11=0;t11<=floord(N-1,32);t11++) {
for (t12=0;t12<=floord(N-1,128);t12++) {
for (t14=32*t11;t14<=min(N-1,32*t11+31);t14++) {
lbv=128*t12;
ubv=min(N-1,128*t12+127);
#pragma ivdep
#pragma vector always
for (t15=lbv;t15<=ubv;t15++) {
A[0][t10][t14][2*N-1-(t15)] = 0.125 * ( A[1][t10+1][t14][2*N-1-(t15)] - 2.0 * A[1][t10][t14][2*N-1-(t15)] + ((t10==0)? A[1][(2*N-1)][t14][2*N-1-(t15)]: A[1][(t10-1)][t14][2*N-1-(t15)])) + 0.125 * ( A[1][t10][t14+1][2*N-1-(t15)] - 2.0 * A[1][t10][t14][2*N-1-(t15)] + ((t14==0)? A[1][t10][(2*N-1)][2*N-1-(t15)]: A[1][t10][(t14-1)][2*N-1-(t15)])) + 0.125 * ( A[1][t10][t14][2*N-1-(t15)-1] - 2.0 * A[1][t10][t14][2*N-1-(t15)] + ((t15==0)? A[1][t10][t14][(0)]: A[1][t10][t14][(2*N-1-(t15)+1)])) + A[1][t10][t14][2*N-1-(t15)];;
}
}
}
}
}
lbp=0;
ubp=N-1;
#pragma omp parallel for private(lbv,ubv,t11,t12,t13,t14,t15)
for (t10=lbp;t10<=ubp;t10++) {
for (t11=0;t11<=floord(N-1,32);t11++) {
for (t12=0;t12<=floord(N-1,128);t12++) {
for (t14=32*t11;t14<=min(N-1,32*t11+31);t14++) {
lbv=128*t12;
ubv=min(N-1,128*t12+127);
#pragma ivdep
#pragma vector always
for (t15=lbv;t15<=ubv;t15++) {
A[0][t10][2*N-1-(t14)][t15] = 0.125 * ( A[1][t10+1][2*N-1-(t14)][t15] - 2.0 * A[1][t10][2*N-1-(t14)][t15] + ((t10==0)? A[1][(2*N-1)][2*N-1-(t14)][t15]: A[1][(t10-1)][2*N-1-(t14)][t15])) + 0.125 * ( ((t14==0)? A[1][t10][(0)][t15]: A[1][t10][(2*N-1-(t14)+1)][t15]) - 2.0 * A[1][t10][2*N-1-(t14)][t15] + A[1][t10][2*N-1-(t14)-1][t15]) + 0.125 * ( ((t15==0)? A[1][t10][2*N-1-(t14)][(2*N-1)]: A[1][t10][2*N-1-(t14)][(t15-1)]) - 2.0 * A[1][t10][2*N-1-(t14)][t15] + A[1][t10][2*N-1-(t14)][t15+1]) + A[1][t10][2*N-1-(t14)][t15];;
}
}
}
}
}
lbp=0;
ubp=N-1;
#pragma omp parallel for private(lbv,ubv,t11,t12,t13,t14,t15)
for (t10=lbp;t10<=ubp;t10++) {
for (t11=0;t11<=floord(N-1,32);t11++) {
for (t12=0;t12<=floord(N-1,128);t12++) {
for (t14=32*t11;t14<=min(N-1,32*t11+31);t14++) {
lbv=128*t12;
ubv=min(N-1,128*t12+127);
#pragma ivdep
#pragma vector always
for (t15=lbv;t15<=ubv;t15++) {
A[0][2*N-1-(t10)][t14][t15] = 0.125 * ( ((t10==0)? A[1][(0)][t14][t15]: A[1][(2*N-1-(t10)+1)][t14][t15]) - 2.0 * A[1][2*N-1-(t10)][t14][t15] + A[1][2*N-1-(t10)-1][t14][t15]) + 0.125 * ( A[1][2*N-1-(t10)][t14+1][t15] - 2.0 * A[1][2*N-1-(t10)][t14][t15] + ((t14==0)? A[1][2*N-1-(t10)][(2*N-1)][t15]: A[1][2*N-1-(t10)][(t14-1)][t15])) + 0.125 * ( ((t15==0)? A[1][2*N-1-(t10)][t14][(2*N-1)]: A[1][2*N-1-(t10)][t14][(t15-1)]) - 2.0 * A[1][2*N-1-(t10)][t14][t15] + A[1][2*N-1-(t10)][t14][t15+1]) + A[1][2*N-1-(t10)][t14][t15];;
}
}
}
}
}
lbp=0;
ubp=N-1;
#pragma omp parallel for private(lbv,ubv,t11,t12,t13,t14,t15)
for (t10=lbp;t10<=ubp;t10++) {
for (t11=0;t11<=floord(N-1,32);t11++) {
for (t12=0;t12<=floord(N-1,128);t12++) {
for (t14=32*t11;t14<=min(N-1,32*t11+31);t14++) {
lbv=128*t12;
ubv=min(N-1,128*t12+127);
#pragma ivdep
#pragma vector always
for (t15=lbv;t15<=ubv;t15++) {
A[0][t10][t14][t15] = 0.125 * ( A[1][t10+1][t14][t15] - 2.0 * A[1][t10][t14][t15] + ((t10==0)? A[1][(2*N-1)][t14][t15]: A[1][(t10-1)][t14][t15])) + 0.125 * ( A[1][t10][t14+1][t15] - 2.0 * A[1][t10][t14][t15] + ((t14==0)? A[1][t10][(2*N-1)][t15]: A[1][t10][(t14-1)][t15])) + 0.125 * ( ((t15==0)? A[1][t10][t14][(2*N-1)]: A[1][t10][t14][(t15-1)]) - 2.0 * A[1][t10][t14][t15] + A[1][t10][t14][t15+1]) + A[1][t10][t14][t15];;
}
}
}
}
}
}
}
/* End of CLooG code */
#undef N
#define N 800L
#undef T
#define T 1L
#ifdef TIME
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double)(result.tv_sec + result.tv_usec * 1.0e-6);
printf("|Time taken: %7.5lfms\t", tdiff * 1.0e3);
printf("|MFLOPS: %f\n", ((((double)NUM_FP_OPS * N *N * N * (T-1)) / tdiff) / 1000000L));
#endif
#ifdef VERIFY
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
for (k = 0; k < N; k++) {
total+= A[T%2][i][j][k] ;
}
}
}
printf("|sum: %e\t", total);
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
for (k = 0; k < N; k++) {
sum_err_sqr += (A[T%2][i][j][k] - (total/N))*(A[T%2][i][j][k] - (total/N));
}
}
}
printf("|rms(A) = %7.2f\t", sqrt(sum_err_sqr));
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
for (k = 0; k < N; k++) {
chtotal += ((char *)A[T%2][i][j])[k];
}
}
}
printf("|sum(rep(A)) = %d\n", chtotal);
#endif
for (l = 0; l < 2; l++){
for (i = 0; i < N; i++){
for (j = 0; j < N; j++)
free(A[l][i][j]); // = (double *) malloc(N * sizeof (double));
free(A[l][i]); // = (double **) malloc(N * sizeof(double *));
}
free(A[l]); // = (double ***) malloc(N * sizeof(double **));
}
return 0;
}
// icc -O3 -fp-model precise heat_1d_np.c -o op-heat-1d-np -lm
// /* @ begin PrimeTile (num_tiling_levels=1; first_depth=1; last_depth=-1; boundary_tiling_level=-1;) @*/
// /* @ begin PrimeRegTile (scalar_replacement=0; T1t5=4; T1t6=4; T1t7=4; T1t8=4; ) @*/
// /* @ end @*/
|
fast_statistics.h | #pragma once
#ifndef FASTSTATISTIC_H
#define FASTSTATISTIC_H
#include <stdint.h>
#include <random>
#include <chrono>
#define MERGE(x, y) ((x & 0xFFFFFFF0) | (y))
/*
This code was adapted by Zur Shmaria in order to use parameterized seed (Needed for multithreading)
*/
namespace SplitMix64
{
/* Modified by D. Lemire, August 2017 */
/***
Fast Splittable Pseudorandom Number Generators
Steele Jr, Guy L., Doug Lea, and Christine H. Flood. "Fast splittable
pseudorandom number generators."
ACM SIGPLAN Notices 49.10 (2014): 453-472.
***/
/* Written in 2015 by Sebastiano Vigna (vigna@acm.org)
To the extent possible under law, the author has dedicated all copyright
and related and neighboring rights to this software to the public domain
worldwide. This software is distributed without any warranty.
See <http://creativecommons.org/publicdomain/zero/1.0/>. */
// original documentation by Vigna:
/* This is a fixed-increment version of Java 8's SplittableRandom generator
See http://dx.doi.org/10.1145/2714064.2660195 and
http://docs.oracle.com/javase/8/docs/api/java/util/SplittableRandom.html
It is a very fast generator passing BigCrush, and it can be useful if
for some reason you absolutely want 64 bits of state; otherwise, we
rather suggest to use a xoroshiro128+ (for moderately parallel
computations) or xorshift1024* (for massively parallel computations)
generator. */
// state for splitmix64
uint64_t state; /* The state can be seeded with any value. */
#pragma omp threadprivate(state)
// call this one before calling splitmix64
static inline void seed(uint64_t seed) { state = seed; }
static inline uint64_t genSeed(int threadID)
{
state = MERGE(std::chrono::high_resolution_clock::now().time_since_epoch().count(), threadID + 1);
return state;
}
// returns random number, modifies splitmix64_x
// compared with D. Lemire against
// http://grepcode.com/file/repository.grepcode.com/java/root/jdk/openjdk/8-b132/java/util/SplittableRandom.java#SplittableRandom.0gamma
static inline uint64_t next_r(uint64_t *seed)
{
uint64_t z = (*seed += UINT64_C(0x9E3779B97F4A7C15));
z = (z ^ (z >> 30)) * UINT64_C(0xBF58476D1CE4E5B9);
z = (z ^ (z >> 27)) * UINT64_C(0x94D049BB133111EB);
return z ^ (z >> 31);
}
// same as splitmix64, but does not change the state, designed by D. Lemire
static inline uint64_t next()
{
return next_r(&state);
}
static inline uint32_t next32() { return (uint32_t)next_r(&state); }
} // namespace SplitMix64
namespace WyRand
{
// adapted to this project by D. Lemire, from https://github.com/wangyi-fudan/wyhash/blob/master/wyhash.h
// This uses mum hashing.
// state for wyrand
uint64_t state; /* The state can be seeded with any value. */
#pragma omp threadprivate(state)
// call wyrand_seed before calling wyrand
static inline void seed(uint64_t seed) { state = seed; }
static inline uint64_t genSeed(int threadID)
{
state = MERGE(std::chrono::high_resolution_clock::now().time_since_epoch().count(), threadID + 1);
return state;
}
static inline uint64_t next_r(uint64_t *s)
{
*s += UINT64_C(0xa0761d6478bd642f);
__uint128_t t = (__uint128_t)*s * (*s ^ UINT64_C(0xe7037ed1a0b428db));
return (t >> 64) ^ t;
}
// returns random number, modifies state
static inline uint64_t next() { return next_r(&state); }
static inline uint32_t next32() { return (uint32_t)next_r(&state); }
} // namespace WyRand
namespace WyHash
{
// adapted to this project by D. Lemire, from https://github.com/wangyi-fudan/wyhash/blob/master/wyhash.h
// This uses mum hashing.
// state for wyrand
// state for wyhash64
uint64_t state; /* The state can be seeded with any value. */
#pragma omp threadprivate(state)
// call wyhash64_seed before calling wyhash64
static inline void seed(uint64_t seed) { state = seed; }
static inline uint64_t genSeed(int threadID)
{
state = MERGE(std::chrono::high_resolution_clock::now().time_since_epoch().count(), threadID + 1);
return state;
}
static inline uint64_t next_r(uint64_t *seed)
{
*seed += UINT64_C(0x60bee2bee120fc15);
__uint128_t tmp;
tmp = (__uint128_t)*seed * UINT64_C(0xa3b195354a39b70d);
uint64_t m1 = (tmp >> 64) ^ tmp;
tmp = (__uint128_t)m1 * UINT64_C(0x1b03738712fad5c9);
uint64_t m2 = (tmp >> 64) ^ tmp;
return m2;
}
// returns random number, modifies state
static inline uint64_t next() { return next_r(&state); }
static inline uint32_t next32() { return (uint32_t)next_r(&state); }
} // namespace WyHash
namespace Xoroshiro128P
{
// original documentation by Vigna:
/* This is the successor to xorshift128+. It is the fastest full-period
generator passing BigCrush without systematic failures, but due to the
relatively short period it is acceptable only for applications with a
mild amount of parallelism; otherwise, use a xorshift1024* generator.
Beside passing BigCrush, this generator passes the PractRand test suite
up to (and included) 16TB, with the exception of binary rank tests,
which fail due to the lowest bit being an LFSR; all other bits pass all
tests. We suggest to use a sign test to extract a random Boolean value.
Note that the generator uses a simulated rotate operation, which most C
compilers will turn into a single instruction. In Java, you can use
Long.rotateLeft(). In languages that do not make low-level rotation
instructions accessible xorshift128+ could be faster.
The state must be seeded so that it is not everywhere zero. If you have
a 64-bit seed, we suggest to seed a splitmix64 generator and use its
output to fill s. */
// state for xoroshiro128plus
uint64_t state[2];
#pragma omp threadprivate(state)
static inline uint64_t rotl(const uint64_t x, int k)
{
return (x << k) | (x >> (64 - k));
}
// call this one before calling xoroshiro128plus
static inline void seed(uint64_t seed)
{
state[0] = SplitMix64::next_r(&seed);
state[1] = SplitMix64::next_r(&seed);
}
static inline uint64_t genSeed(int threadID)
{
seed(MERGE(std::chrono::high_resolution_clock::now().time_since_epoch().count(), threadID + 1));
return state[0];
}
// returns random number, modifies xoroshiro128plus_s
static inline uint64_t next_r(uint64_t seed[2])
{
const uint64_t s0 = seed[0];
uint64_t s1 = seed[1];
const uint64_t result = s0 + s1;
s1 ^= s0;
seed[0] = rotl(s0, 55) ^ s1 ^ (s1 << 14); // a, b
seed[1] = rotl(s1, 36); // c
return result;
}
static inline uint64_t next()
{
return next_r(state);
}
static inline uint32_t next32() { return (uint32_t)next_r(state); }
} // namespace Xoroshiro128P
namespace Xoroshiro128PP
{
static inline uint32_t rotl(const uint32_t x, int k)
{
return (x << k) | (x >> (32 - k));
}
uint32_t state[4];
#pragma omp threadprivate(state)
static inline void seed(uint64_t seed)
{
state[0] = SplitMix64::next_r(&seed);
state[1] = SplitMix64::next_r(&seed);
state[2] = SplitMix64::next_r(&seed);
state[3] = SplitMix64::next_r(&seed);
}
static inline uint64_t genSeed(int threadID)
{
seed(MERGE(std::chrono::high_resolution_clock::now().time_since_epoch().count(), threadID + 1));
return state[0];
}
uint32_t next_r(uint32_t seed[4])
{
const uint32_t result = rotl(seed[0] + seed[3], 7) + seed[0];
const uint32_t t = seed[1] << 9;
seed[2] ^= seed[0];
seed[3] ^= seed[1];
seed[1] ^= seed[2];
seed[0] ^= seed[3];
seed[2] ^= t;
seed[3] = rotl(seed[3], 11);
return result;
}
uint32_t next()
{
return next_r(state);
}
} // namespace Xoroshiro128PP
namespace GMS::FastStatistics {
/**
* Provides STL compatible RNG over WyRand.
*/
class WyRandRng {
private:
uint64_t state;
public:
using result_type = uint64_t;
constexpr result_type min() { return 0; }
constexpr result_type max() { return std::numeric_limits<result_type>::max(); }
WyRandRng(int threadId) {
state = WyRand::genSeed(threadId);
}
result_type operator()() {
return WyRand::next_r(&state);
}
};
/**
* Provides STL compatible RNG over Xoroshiro128PP.
*/
class Xoroshiro128PPRng {
private:
uint32_t state[4];
public:
using result_type = uint32_t;
constexpr result_type min() { return 0; }
constexpr result_type max() { return std::numeric_limits<result_type>::max(); }
Xoroshiro128PPRng(int threadId) {
Xoroshiro128PP::genSeed(threadId);
for (int i = 0; i < 4; ++i) {
state[i] = Xoroshiro128PP::state[i];
}
}
result_type operator()() {
return Xoroshiro128PP::next_r(state);
}
};
}
#endif |
pt_to_pt_haloexchange.c | /*****************************************************************************
* *
* Mixed-mode OpenMP/MPI MicroBenchmark Suite - Version 1.0 *
* *
* produced by *
* *
* Mark Bull, Jim Enright and Fiona Reid *
* *
* at *
* *
* Edinburgh Parallel Computing Centre *
* *
* email: markb@epcc.ed.ac.uk, fiona@epcc.ed.ac.uk *
* *
* *
* Copyright 2012, The University of Edinburgh *
* *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); *
* you may not use this file except in compliance with the License. *
* You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
* See the License for the specific language governing permissions and *
* limitations under the License. *
* *
****************************************************************************/
/*-----------------------------------------------------------*/
/* Contains the point-to-point halo exchange mixed mode */
/* OpenMP/MPI benchmarks. */
/* This includes: -masteronly haloexchange */
/* -funnelled haloexchange */
/* -multiple haloexchange */
/*-----------------------------------------------------------*/
#include "pt_to_pt_haloexchange.h"
/*-----------------------------------------------------------*/
/* haloExchange */
/* */
/* Driver subroutine for the haloExchange benchmark. */
/*-----------------------------------------------------------*/
int haloExchange(int benchmarkType) {
int dataSizeIter;
/* find the ranks of the left and right neighbour */
findNeighbours();
/* initialise repsToDo to defaultReps */
repsToDo = defaultReps;
/* Start loop over data sizes */
dataSizeIter = minDataSize; /* Initialise dataSizeIter */
while (dataSizeIter <= maxDataSize) {
/* set sizeofBuffer */
sizeofBuffer = dataSizeIter * numThreads;
/*Allocate space for the main data arrays */
allocateHaloexchangeData(sizeofBuffer);
/* perform benchmark warm-up */
if (benchmarkType == MASTERONLY) {
masteronlyHaloexchange(warmUpIters, dataSizeIter);
} else if (benchmarkType == FUNNELLED) {
funnelledHaloexchange(warmUpIters, dataSizeIter);
} else if (benchmarkType == MULTIPLE) {
multipleHaloexchange(warmUpIters, dataSizeIter);
}
/* Each process performs a verification test */
testHaloexchange(sizeofBuffer, dataSizeIter);
/*Initialise the benchmark */
benchComplete = FALSE;
/*Execute benchmark until target time is reached */
while (benchComplete != TRUE) {
/*Start timer */
MPI_Barrier(comm);
startTime = MPI_Wtime();
/*Execute benchmarkType for repsToDo repetitions*/
if (benchmarkType == MASTERONLY) {
masteronlyHaloexchange(repsToDo, dataSizeIter);
} else if (benchmarkType == FUNNELLED) {
funnelledHaloexchange(repsToDo, dataSizeIter);
} else if (benchmarkType == MULTIPLE) {
multipleHaloexchange(repsToDo, dataSizeIter);
}
/*Stop timer */
MPI_Barrier(comm);
finishTime = MPI_Wtime();
totalTime = finishTime - startTime;
/* Test if target time is reached with the number of reps */
if (myMPIRank == 0) { benchComplete = repTimeCheck(totalTime, repsToDo); }
/* Ensure all procs have the same value of benchComplete */
/* and repsToDo */
MPI_Bcast(&benchComplete, 1, MPI_INT, 0, comm);
MPI_Bcast(&repsToDo, 1, MPI_INT, 0, comm);
}
/* Master process sets benchmark results */
if (myMPIRank == 0) {
setReportParams(dataSizeIter, repsToDo, totalTime);
printReport();
}
/* Free allocated data */
freeHaloexchangeData();
/* Double dataSize and loop again */
dataSizeIter = dataSizeIter * 2;
}
return 0;
}
/*-----------------------------------------------------------*/
/* masteronlyHaloexchange */
/* */
/* Each process exchanges a message with its left and */
/* right neighbour. */
/* Communication takes place outside of the parallel */
/* region. */
/*-----------------------------------------------------------*/
int masteronlyHaloexchange(int totalReps, int dataSize) {
int repIter, i;
for (repIter = 0; repIter < totalReps; repIter++) {
/* Each thread writes its globalID to rightSendBuf
* and leftSendBuf using a parallel for directive.
*/
#pragma omp parallel for default(none) private(i) \
shared(leftSendBuf, rightSendBuf, dataSize) \
shared(sizeofBuffer, globalIDarray) schedule(static, dataSize)
for (i = 0; i < sizeofBuffer; i++) {
leftSendBuf[i] = globalIDarray[myThreadID];
rightSendBuf[i] = globalIDarray[myThreadID];
}
/* Process starts send of data to leftNeighbour and
* rightNeighbour using non-blocking send...
*/
MPI_Isend(leftSendBuf, sizeofBuffer, MPI_INT, leftNeighbour, TAG, commCart,
&requestArray[0]);
MPI_Isend(rightSendBuf, sizeofBuffer, MPI_INT, rightNeighbour, TAG,
commCart, &requestArray[1]);
/* Process then waits for messages from leftNeighbour and rightNeighbour */
MPI_Irecv(leftRecvBuf, sizeofBuffer, MPI_INT, leftNeighbour, TAG, commCart,
&requestArray[2]);
MPI_Irecv(rightRecvBuf, sizeofBuffer, MPI_INT, rightNeighbour, TAG,
commCart, &requestArray[3]);
/* Finish the sends with an MPI_Waitall on the requests */
MPI_Waitall(4, requestArray, statusArray);
/* Each thread now reads its part of the left and right
* received buffers.
*/
#pragma omp parallel for default(none) private(i) \
shared(leftRecvBuf, rightRecvBuf, dataSize, sizeofBuffer) \
shared(finalLeftBuf, finalRightBuf) schedule(static, dataSize)
for (i = 0; i < sizeofBuffer; i++) {
finalLeftBuf[i] = leftRecvBuf[i];
finalRightBuf[i] = rightRecvBuf[i];
}
}
return 0;
}
/*-----------------------------------------------------------*/
/* funnelledHaloexchange */
/* */
/* Each process exchanges a message with its left and */
/* right neighbour. */
/* Communication takes place by one thread inside of the */
/* parallel region. */
/*-----------------------------------------------------------*/
int funnelledHaloexchange(int totalReps, int dataSize) {
int repIter, i;
/* Open the parallel region */
#pragma omp parallel default(none) private(i, repIter) \
shared(dataSize, sizeofBuffer, leftSendBuf, rightSendBuf) \
shared(rightRecvBuf, leftRecvBuf, finalLeftBuf, finalRightBuf) \
shared(globalIDarray, commCart, totalReps, requestArray, statusArray) \
shared(leftNeighbour, rightNeighbour)
{
for (repIter = 0; repIter < totalReps; repIter++) {
/* Each thread writes its globalID to rightSendBuf
* and leftSendBuf.
*/
#pragma omp for schedule(static, dataSize)
for (i = 0; i < sizeofBuffer; i++) {
leftSendBuf[i] = globalIDarray[myThreadID];
rightSendBuf[i] = globalIDarray[myThreadID];
}
/* Implicit barrier here takes care of necessary synchronisation */
#pragma omp master
{
/* Master thread starts send of data to left and right neighbours
* with a non-blocking send.
*/
MPI_Isend(leftSendBuf, sizeofBuffer, MPI_INT, leftNeighbour, TAG,
commCart, &requestArray[0]);
MPI_Isend(rightSendBuf, sizeofBuffer, MPI_INT, rightNeighbour, TAG,
commCart, &requestArray[1]);
/* Thread then starts receive of messages from leftNeighbour
* and rightNeighbour.
*/
MPI_Irecv(leftRecvBuf, sizeofBuffer, MPI_INT, leftNeighbour, TAG,
commCart, &requestArray[2]);
MPI_Irecv(rightRecvBuf, sizeofBuffer, MPI_INT, rightNeighbour, TAG,
commCart, &requestArray[3]);
/* Finish the sends and receives with an MPI_Waitall on the requests */
MPI_Waitall(4, requestArray, statusArray);
}
/*Barrier to ensure master thread has completed transfer. */
#pragma omp barrier
/* Each thread now reads its part of the left and right received buffers.
*/
#pragma omp for schedule(static, dataSize)
for (i = 0; i < sizeofBuffer; i++) {
finalLeftBuf[i] = leftRecvBuf[i];
finalRightBuf[i] = rightRecvBuf[i];
}
}
}
return 0;
}
/*-----------------------------------------------------------*/
/* multipleHaloexchange */
/* */
/* Each process exchanges a message with its left and */
/* right neighbour. */
/* All threads take part in the inter-porcess */
/* communication. */
/*-----------------------------------------------------------*/
int multipleHaloexchange(int totalReps, int dataSize) {
int repIter, i;
int lBound;
/* Open the parallel region */
#pragma omp parallel default(none) private(i, requestArray, statusArray, \
lBound, repIter) \
shared(dataSize, sizeofBuffer, leftSendBuf, rightSendBuf) \
shared(rightRecvBuf, leftRecvBuf, finalLeftBuf, finalRightBuf) shared( \
leftNeighbour, rightNeighbour, globalIDarray, commCart, totalReps)
{
for (repIter = 0; repIter < totalReps; repIter++) {
/* Calculate lower bound for each thread */
lBound = (myThreadID * dataSize);
/* Each thread writes its globalID to rightSendBuf
* and leftSendBuf.
*/
#pragma omp for nowait schedule(static, dataSize)
for (i = 0; i < sizeofBuffer; i++) {
leftSendBuf[i] = globalIDarray[myThreadID];
rightSendBuf[i] = globalIDarray[myThreadID];
}
/* Each thread starts send of dataSize items to leftNeighbour
* and to rightNeighbour.
*/
MPI_Isend(&leftSendBuf[lBound], dataSize, MPI_INT, leftNeighbour,
myThreadID, commCart, &requestArray[0]);
MPI_Isend(&rightSendBuf[lBound], dataSize, MPI_INT, rightNeighbour,
myThreadID, commCart, &requestArray[1]);
/* Each Thread then starts receive of messages from leftNeighbour
* and rightNeighbour.
*/
MPI_Irecv(&leftRecvBuf[lBound], dataSize, MPI_INT, leftNeighbour,
myThreadID, commCart, &requestArray[2]);
MPI_Irecv(&rightRecvBuf[lBound], dataSize, MPI_INT, rightNeighbour,
myThreadID, commCart, &requestArray[3]);
/* Finish the sends with an MPI_Waitall on the requests */
MPI_Waitall(4, requestArray, statusArray);
/* Each thread now reads its part of the left and
* right received buffers.
*/
#pragma omp for nowait schedule(static, dataSize)
for (i = 0; i < sizeofBuffer; i++) {
finalLeftBuf[i] = leftRecvBuf[i];
finalRightBuf[i] = rightRecvBuf[i];
}
}
}
return 0;
}
/*-----------------------------------------------------------*/
/* allocateHaloexchangeData */
/* */
/* Allocate memory for the main data arrays in the */
/* haloexchange. */
/*-----------------------------------------------------------*/
int allocateHaloexchangeData(int sizeofBuffer) {
leftSendBuf = (int *)malloc(sizeofBuffer * sizeof(int));
leftRecvBuf = (int *)malloc(sizeofBuffer * sizeof(int));
rightSendBuf = (int *)malloc(sizeofBuffer * sizeof(int));
rightRecvBuf = (int *)malloc(sizeofBuffer * sizeof(int));
finalLeftBuf = (int *)malloc(sizeofBuffer * sizeof(int));
finalRightBuf = (int *)malloc(sizeofBuffer * sizeof(int));
return 0;
}
/*-----------------------------------------------------------*/
/* freeHaloexchangeData */
/* */
/* Deallocates the storage space for the main data arrays. */
/*-----------------------------------------------------------*/
int freeHaloexchangeData() {
free(leftSendBuf);
free(leftRecvBuf);
free(rightSendBuf);
free(rightRecvBuf);
free(finalLeftBuf);
free(finalRightBuf);
return 0;
}
/*-----------------------------------------------------------*/
/* testHaloexchange */
/* */
/* Verifies that the halo exchange benchmark worked */
/* correctly. */
/*-----------------------------------------------------------*/
int testHaloexchange(int sizeofBuffer, int dataSize) {
int i;
int testFlag, reduceFlag;
int *testLeftBuf, *testRightBuf;
/* set testFlag to true */
testFlag = TRUE;
/*allocate space for testLeftBuf and testRightBuf */
testLeftBuf = (int *)malloc(sizeofBuffer * sizeof(int));
testRightBuf = (int *)malloc(sizeofBuffer * sizeof(int));
/*construct testLeftBuf and testRightBuf with correct values */
#pragma omp parallel for default(none) private(i) \
shared(leftNeighbour, rightNeighbour, numThreads) \
shared(dataSize, sizeofBuffer, testLeftBuf, testRightBuf) \
schedule(static, dataSize)
for (i = 0; i < sizeofBuffer; i++) {
/* Calculate globalID of thread expected in finalLeftBuf.. */
testLeftBuf[i] = (leftNeighbour * numThreads) + myThreadID;
/* ..and in finalRightBuf. */
testRightBuf[i] = (rightNeighbour * numThreads) + myThreadID;
}
/* Compare.. */
for (i = 0; i < sizeofBuffer; i++) {
/* 1) values from left neighbour */
if (testLeftBuf[i] != finalLeftBuf[i]) { testFlag = FALSE; }
/* 2) values from right neighbour */
if (testRightBuf[i] != finalRightBuf[i]) { testFlag = FALSE; }
}
MPI_Reduce(&testFlag, &reduceFlag, 1, MPI_INT, MPI_LAND, 0, comm);
/* Master then sets testOutcome flag */
if (myMPIRank == 0) { setTestOutcome(reduceFlag); }
/* free space for testLeftBuf and testRightBuf */
free(testLeftBuf);
free(testRightBuf);
return 0;
}
|
GB_binop__pair_fp64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__pair_fp64)
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__pair_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__pair_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pair_fp64)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: double
// A type: double
// A pattern? 1
// B type: double
// B pattern? 1
// BinaryOp: cij = 1
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
;
// true if values of A are not used
#define GB_A_IS_PATTERN \
1 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
;
// true if values of B are not used
#define GB_B_IS_PATTERN \
1 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = 1 ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PAIR || GxB_NO_FP64 || GxB_NO_PAIR_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__pair_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__pair_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__pair_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__pair_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
double alpha_scalar ;
double beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((double *) alpha_scalar_in)) ;
beta_scalar = (*((double *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
; ;
Cx [p] = 1 ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = 1 ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = 1 ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = 1 ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
private.c | #include<stdio.h>
#include<omp.h>
void func(){
int tmp = 0;
#pragma omp parallel for firstprivate(tmp)
for (int j = 0; j<10; ++j){
printf("%d\t", tmp);
tmp += j;
}
printf("\n%d\n", tmp);
}
int main(){
func();
return 0;
}
|
CMS_simint.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <math.h>
#include <sys/time.h>
#include "simint/simint.h"
#include "CMS_config.h"
#include "CMS_basis.h"
#include "CMS_simint.h"
#define NCART(am) (((am)+1)*((am)+2)/2)
typedef struct simint_shell shell_s;
typedef struct simint_shell* shell_t;
typedef struct simint_multi_shellpair multi_sp_s;
typedef struct simint_multi_shellpair* multi_sp_t;
static double CMS_get_walltime_sec()
{
double sec;
struct timeval tv;
gettimeofday(&tv, NULL);
sec = tv.tv_sec + (double) tv.tv_usec / 1000000.0;
return sec;
}
void CMS_Simint_init(BasisSet_p basis, Simint_p *simint, int nthread, double prim_scrval)
{
CMS_ASSERT(nthread > 0);
Simint_p s = (Simint_p) calloc(1, sizeof(struct Simint));
CMS_ASSERT(s != NULL);
simint_init();
s->nthread = nthread;
s->max_am = basis->max_momentum;
int max_ncart = NCART(s->max_am), buff_size;
// Allocate workbuf for all threads on this node
buff_size = simint_ostei_workmem(0, s->max_am);
if (buff_size < max_ncart * max_ncart) buff_size = max_ncart * max_ncart;
buff_size = (buff_size + 7) / 8 * 8; // Align to 8 double (64 bytes)
s->workmem_per_thread = buff_size;
s->workbuf = (double *) CMS_malloc_aligned(s->workmem_per_thread * nthread * sizeof(double), 64);
CMS_ASSERT(s->workbuf != NULL);
// Allocate outbuf for all threads on this node
// Output buffer should holds Simint_NSHELL_SIMD ERI results
// +8 for Simint primitive screening statistic info
buff_size = max_ncart * max_ncart * max_ncart * max_ncart;
buff_size = (buff_size + 7) / 8 * 8; // Align to 8 double (64 bytes)
s->outmem_per_thread = buff_size * _SIMINT_NSHELL_SIMD + 8;
s->outbuf = (double *) CMS_malloc_aligned(s->outmem_per_thread * nthread * sizeof(double), 64);
CMS_ASSERT(s->outbuf != NULL);
// Form and store Simint shells for all shells of this molecule
int nshell = basis->nshells;
size_t shells_msize = sizeof(shell_s) * nshell;
s->nshell = nshell;
s->shells = (shell_t) malloc(shells_msize);
CMS_ASSERT(s->shells != NULL);
s->shell_memsize = (double) shells_msize;
shell_t shell_ptr = s->shells;
for (int i=0; i < nshell; i++)
{
// Initialize variables in structure
simint_initialize_shell(shell_ptr);
// Allocate space for alpha and coef for the shell
simint_allocate_shell(basis->nexp[i], shell_ptr);
s->shell_memsize += (double) shell_ptr->memsize;
shell_ptr->am = basis->momentum[i];
shell_ptr->nprim = basis->nexp[i];
shell_ptr->x = basis->xyz0[i*4+0];
shell_ptr->y = basis->xyz0[i*4+1];
shell_ptr->z = basis->xyz0[i*4+2];
for (int j=0; j<basis->nexp[i]; j++)
{
shell_ptr->alpha[j] = basis->exp[i][j];
shell_ptr->coef[j] = basis->cc[i][j];
}
shell_ptr++;
}
// Here we assume there are no unit shells (shells with zero orbital exponent)
simint_normalize_shells(nshell, s->shells);
// For primitive screening, fast Schwarz might have issue with aug-cc-pVDZ,
// try to use SIMINT_SCREEN_SCHWARZ if necessary
if (prim_scrval < 0.0 || prim_scrval > 1) prim_scrval = 1e-14;
s->screen_method = SIMINT_SCREEN_SCHWARZ;
s->screen_tol = prim_scrval;
printf("Simint screen method = SIMINT_SCREEN_SCHWARZ \n");
printf("Simint prim screen tol = %.2e\n", s->screen_tol);
// Precompute all shell pairs
// Will be used by CMS_Simint_fill_multi_sp_list(), DO NOT SKIP it!!!
double sp_msize = sizeof(multi_sp_s) * nshell * nshell;
s->shellpairs = (multi_sp_t) malloc(sp_msize);
CMS_ASSERT(s->shellpairs != NULL);
s->shellpair_memsize = (double) sp_msize;
// Do not initialize all shell pairs now to reduce the memory usage,
// unique screened shell pairs will be created after screening.
// Reset timer
s->ostei_setup = 0.0;
s->ostei_actual = 0.0;
s->fock_update_F = 0.0;
// Allocate space for statistic info
int stat_info_size = sizeof(double) * nthread;
s->num_multi_shellpairs = (double*) malloc(stat_info_size);
s->sum_nprim = (double*) malloc(stat_info_size);
s->num_screened_prim = (double*) malloc(stat_info_size);
s->num_unscreened_prim = (double*) malloc(stat_info_size);
s->num_screened_vec = (double*) malloc(stat_info_size);
s->num_unscreened_vec = (double*) malloc(stat_info_size);
CMS_ASSERT(s->num_multi_shellpairs != NULL && s->sum_nprim != NULL);
CMS_ASSERT(s->num_screened_prim != NULL && s->num_unscreened_prim != NULL);
CMS_ASSERT(s->num_screened_vec != NULL && s->num_unscreened_vec != NULL);
memset(s->num_multi_shellpairs, 0, stat_info_size);
memset(s->sum_nprim, 0, stat_info_size);
memset(s->num_screened_prim, 0, stat_info_size);
memset(s->num_unscreened_prim, 0, stat_info_size);
memset(s->num_screened_vec, 0, stat_info_size);
memset(s->num_unscreened_vec, 0, stat_info_size);
s->df_am_shell_id = NULL;
s->df_am_shell_spos = NULL;
s->df_am_shell_num = NULL;
s->df_shells = NULL;
s->df_shellpairs = NULL;
*simint = s;
}
void CMS_Simint_create_uniq_scr_sp(Simint_p simint, const int nsp, const int *M_list, const int *N_list)
{
int nshell = simint->nshell;
#pragma omp parallel for schedule(dynamic, 16)
for (int i = 0; i < nsp; i++)
{
int M = M_list[i];
int N = N_list[i];
multi_sp_t MN_pair = &simint->shellpairs[M * nshell + N];
simint_initialize_multi_shellpair(MN_pair);
simint_create_multi_shellpair(1, simint->shells + M, 1, simint->shells + N, MN_pair, simint->screen_method);
simint->shellpair_memsize += (double) MN_pair->memsize;
if (M != N)
{
multi_sp_t NM_pair = &simint->shellpairs[N * nshell + M];
simint_initialize_multi_shellpair(NM_pair);
simint_create_multi_shellpair(1, simint->shells + N, 1, simint->shells + M, NM_pair, simint->screen_method);
simint->shellpair_memsize += (double) NM_pair->memsize;
}
}
double workmem_MB = simint->workmem_per_thread * 64 * sizeof(double) / 1048576.0;
double outmem_MB = simint->outmem_per_thread * 64 * sizeof(double) / 1048576.0;
double shellpair_mem_MB = simint->shellpair_memsize / 1048576.0;
double stat_info_mem_MB = sizeof(double) * simint->nthread * 6 / 1048576.0;
double Simint_mem_MB = workmem_MB + outmem_MB + outmem_MB + shellpair_mem_MB + stat_info_mem_MB;
printf("CMS Simint memory usage = %.2lf MB \n", Simint_mem_MB);
}
void CMS_Simint_setup_DF(Simint_p simint, BasisSet_p df_basis)
{
Simint_p s = simint;
// Reallocate workbuf for density fitting
s->df_max_am = df_basis->max_momentum;
if (s->df_max_am > s->max_am) s->max_am = s->df_max_am;
int max_ncart = NCART(s->max_am);
int buff_size = simint_ostei_workmem(0, s->max_am);
if (buff_size < max_ncart * max_ncart) buff_size = max_ncart * max_ncart;
buff_size = (buff_size + 7) / 8 * 8; // Align to 8 double (64 bytes)
s->workmem_per_thread = buff_size;
CMS_free_aligned(s->workbuf);
s->workbuf = (double *) CMS_malloc_aligned(s->workmem_per_thread * s->nthread * sizeof(double), 64);
CMS_ASSERT(s->workbuf != NULL);
// Form and store Simint shells for all density fitting shells
// The last shell is the unit shell
int df_nshell = df_basis->nshells;
size_t df_shells_msize = sizeof(shell_s) * (df_nshell + 1);
s->df_nshell = df_nshell;
s->df_shells = (shell_t) malloc(df_shells_msize);
CMS_ASSERT(s->shells != NULL);
s->shell_memsize = (double) df_shells_msize;
// Copy all density fitting shells
shell_t df_shell_ptr = s->df_shells;
for (int i = 0; i < df_nshell; i++)
{
// Initialize variables in structure
simint_initialize_shell(df_shell_ptr);
// Allocate space for alpha and coef for the shell
simint_allocate_shell(df_basis->nexp[i], df_shell_ptr);
s->shell_memsize += (double) df_shell_ptr->memsize;
df_shell_ptr->am = df_basis->momentum[i];
df_shell_ptr->nprim = df_basis->nexp[i];
df_shell_ptr->x = df_basis->xyz0[i*4+0];
df_shell_ptr->y = df_basis->xyz0[i*4+1];
df_shell_ptr->z = df_basis->xyz0[i*4+2];
for (int j = 0; j < df_basis->nexp[i]; j++)
{
df_shell_ptr->alpha[j] = df_basis->exp[i][j];
df_shell_ptr->coef[j] = df_basis->cc[i][j];
}
df_shell_ptr++;
}
// The unit shell
simint_initialize_shell(df_shell_ptr);
simint_allocate_shell(1, df_shell_ptr);
s->shell_memsize += (double) df_shell_ptr->memsize;
df_shell_ptr->am = 0;
df_shell_ptr->nprim = 1;
df_shell_ptr->x = 0;
df_shell_ptr->y = 0;
df_shell_ptr->z = 0;
df_shell_ptr->alpha[0] = 0;
df_shell_ptr->coef[0] = 1;
// Normalize shells except the unit shells
simint_normalize_shells(df_nshell, s->df_shells);
// Precompute all shell pairs for density fitting, DO NOT SKIP IT
size_t df_sp_msize = sizeof(multi_sp_s) * df_nshell;
s->df_shellpairs = (multi_sp_t) malloc(df_sp_msize);
CMS_ASSERT(s->df_shellpairs != NULL);
s->shellpair_memsize += (double) df_sp_msize;
int unit_shell_id = df_nshell;
for (int i = 0; i < df_nshell; i++)
{
multi_sp_t pair;
pair = &s->df_shellpairs[i];
simint_initialize_multi_shellpair(pair);
simint_create_multi_shellpair(1, s->df_shells+i, 1, s->df_shells+unit_shell_id, pair, s->screen_method);
s->shellpair_memsize += (double) pair->memsize;
}
// Group density fitting shells by AM
s->df_am_shell_id = (int*) malloc(sizeof(int) * df_nshell);
s->df_am_shell_spos = (int*) malloc(sizeof(int) * (s->df_max_am + 2));
s->df_am_shell_num = (int*) malloc(sizeof(int) * (s->df_max_am + 1));
memset(s->df_am_shell_num, 0, sizeof(int) * (s->df_max_am + 1));
for (int i = 0; i < df_nshell; i++)
{
int am = s->df_shells[i].am;
s->df_am_shell_num[am]++;
}
memset(s->df_am_shell_spos, 0, sizeof(int) * (s->df_max_am + 2));
for (int i = 1; i <= s->df_max_am + 1; i++)
s->df_am_shell_spos[i] = s->df_am_shell_spos[i - 1] + s->df_am_shell_num[i - 1];
memset(s->df_am_shell_num, 0, sizeof(int) * (s->df_max_am + 1));
for (int i = 0; i < df_nshell; i++)
{
int am = s->df_shells[i].am;
int group_pos = s->df_am_shell_spos[am] + s->df_am_shell_num[am];
s->df_am_shell_id[group_pos] = i;
s->df_am_shell_num[am]++;
}
}
void CMS_Simint_free_DF_shellpairs(Simint_p simint)
{
int df_nshell = simint->df_nshell;
simint_free_shells(df_nshell + 1, simint->df_shells);
simint_free_multi_shellpairs(df_nshell, simint->df_shellpairs);
free(simint->df_shells);
free(simint->df_shellpairs);
free(simint->df_am_shell_id);
free(simint->df_am_shell_spos);
free(simint->df_am_shell_num);
simint->df_shells = NULL;
simint->df_shellpairs = NULL;
simint->df_am_shell_id = NULL;
simint->df_am_shell_spos = NULL;
simint->df_am_shell_num = NULL;
}
void CMS_Simint_destroy(Simint_p simint, int show_stat)
{
// Generate final statistic info
double sum_msp = 0, sum_nprim = 0;
double total_prim = 0, unscreened_prim = 0;
double total_vec = 0, unscreened_vec = 0;
for (int i = 0; i < simint->nthread; i++)
{
sum_msp += (double) simint->num_multi_shellpairs[i];
sum_nprim += (double) simint->sum_nprim[i];
total_prim += simint->num_screened_prim[i] + simint->num_unscreened_prim[i];
unscreened_prim += simint->num_unscreened_prim[i];
total_vec += simint->num_screened_vec[i] + simint->num_unscreened_vec[i];
unscreened_vec += simint->num_unscreened_vec[i];
}
double avg_nprim = sum_nprim / sum_msp;
double prim_unscreen_ratio = unscreened_prim / total_prim;
double vec_unscreen_ratio = unscreened_vec / total_vec;
// Print timer and statistic info
if (show_stat)
{
printf(
"Timer: Simint setup, Simint ERI actual, Fock mat accum. = %lf, %lf, %lf sec\n",
simint->ostei_setup, simint->ostei_actual, simint->fock_update_F
);
printf(
"Simint statistic: avg. ket-side nprim, prim unscreened ratio, SIMD unscreened ratio = %.1lf, %.1lf %%, %.1lf %%\n",
avg_nprim, prim_unscreen_ratio * 100.0, vec_unscreen_ratio * 100.0
);
}
// Free shell pair info
int nshell = simint->nshell;
int df_nshell = simint->df_nshell;
if (simint->df_shells != NULL)
{
simint_free_shells(df_nshell + 1, simint->df_shells);
simint_free_multi_shellpairs(df_nshell, simint->df_shellpairs);
}
simint_free_shells(nshell, simint->shells);
simint_free_multi_shellpairs(nshell * nshell, simint->shellpairs);
// Free memory
free(simint->shellpairs);
free(simint->shells);
free(simint->df_shellpairs);
free(simint->df_shells);
free(simint->df_am_shell_id);
free(simint->df_am_shell_spos);
free(simint->df_am_shell_num);
CMS_free_aligned(simint->workbuf);
CMS_free_aligned(simint->outbuf);
free(simint->num_multi_shellpairs);
free(simint->sum_nprim);
free(simint->num_screened_prim);
free(simint->num_unscreened_prim);
free(simint->num_screened_vec);
free(simint->num_unscreened_vec);
free(simint);
simint_finalize();
}
int CMS_Simint_get_sp_AM_idx(Simint_p simint, int P, int Q)
{
shell_t shells = simint->shells;
return shells[P].am * ((_SIMINT_OSTEI_MAXAM) + 1) + shells[Q].am;
}
double CMS_Simint_get_DF_sp_scrval(Simint_p simint, int i)
{
multi_sp_t pair;
pair = &simint->df_shellpairs[i];
return pair->screen_max;
}
void CMS_Simint_create_multi_sp(void **multi_sp_)
{
multi_sp_t multi_sp;
multi_sp = (multi_sp_t) malloc(sizeof(multi_sp_s));
CMS_ASSERT(multi_sp != NULL);
// Need not to worry about memory allocation, it will be handled later
simint_initialize_multi_shellpair(multi_sp);
*multi_sp_ = multi_sp;
}
void CMS_Simint_free_multi_sp(void *multi_sp)
{
CMS_ASSERT(multi_sp != NULL);
simint_free_multi_shellpair(multi_sp);
free(multi_sp);
}
static void CMS_Simint_fill_multi_sp_list(
Simint_p simint, int npair, int *P_list, int *Q_list,
multi_sp_t multi_sp
)
{
// Put the original multi_shellpairs corresponding to the shell
// pairs (P_list[i], Q_list[i]) into the list
multi_sp_t Pin[_SIMINT_NSHELL_SIMD];
for (int ipair = 0; ipair < npair; ipair++)
{
int P = P_list[ipair];
int Q = Q_list[ipair];
Pin[ipair] = &simint->shellpairs[P * simint->nshell + Q];
}
// Reset output multi_sp and copy from existing multi_shellpairs.
// simint_cat_multi_shellpair() will check and allocate memory for output
multi_sp->nprim = 0;
simint_cat_shellpairs(
npair, (const struct simint_multi_shellpair **) Pin,
multi_sp, simint->screen_method
);
}
void CMS_Simint_calc_pair_Hcore(
BasisSet_p basis, Simint_p simint, int tid,
int A, int B, double **integrals, int *nint
)
{
int size, ret;
struct simint_shell *shells = simint->shells;
size = NCART(shells[A].am) * NCART(shells[B].am);
double *workbuf = &simint->workbuf[tid * simint->workmem_per_thread];
ret = simint_compute_ke(&shells[A], &shells[B], workbuf);
CMS_ASSERT(ret == 1);
double *output_buff = &simint->outbuf[tid * simint->outmem_per_thread];
ret = simint_compute_potential(
basis->natoms, basis->charge, basis->xn, basis->yn, basis->zn,
&shells[A], &shells[B], output_buff
);
CMS_ASSERT(ret == 1);
for (int i = 0; i < size; i++) output_buff[i] += workbuf[i];
*integrals = output_buff;
*nint = size;
}
void CMS_Simint_calc_pair_ovlp(
Simint_p simint, int tid, int A, int B,
double **integrals, int *nint
)
{
int size, ret;
struct simint_shell *shells = simint->shells;
double *output_buff = &simint->outbuf[tid*simint->outmem_per_thread];
ret = simint_compute_overlap(&shells[A], &shells[B], output_buff);
CMS_ASSERT(ret == 1);
size = NCART(shells[A].am) * NCART(shells[B].am);
*integrals = output_buff;
*nint = size;
}
void CMS_Simint_calc_shellquartet(
Simint_p simint, int tid, int M, int N,
int P, int Q, double **ERI, int *nint
)
{
double setup_start, setup_end, ostei_start, ostei_end;
if (tid == 0) setup_start = CMS_get_walltime_sec();
int nshell = simint->nshell;
multi_sp_t bra_pair = &simint->shellpairs[M * nshell + N];
multi_sp_t ket_pair = &simint->shellpairs[P * nshell + Q];
simint->num_multi_shellpairs[tid] += 1.0;
simint->sum_nprim[tid] += (double) ket_pair->nprim;
if (tid == 0)
{
setup_end = CMS_get_walltime_sec();
ostei_start = CMS_get_walltime_sec();
}
double *work_buff = &simint->workbuf[tid * simint->workmem_per_thread];
double *output_buff = &simint->outbuf [tid * simint->outmem_per_thread];
int ret = simint_compute_eri(
bra_pair, ket_pair, simint->screen_tol,
work_buff, output_buff
);
if (tid == 0) ostei_end = CMS_get_walltime_sec();
int ERI_size;
if (ret < 0)
{
ERI_size = 0; // Return zero ERI_size to caller; output buffer is not initialized
} else {
CMS_ASSERT(ret == 1); // Single shell quartet
shell_t shells = simint->shells;
ERI_size = NCART(shells[M].am) * NCART(shells[N].am)
* NCART(shells[P].am) * NCART(shells[Q].am);
}
*ERI = output_buff;
*nint = ERI_size;
double *prim_screen_stat_info = *ERI + ERI_size;
simint->num_unscreened_prim[tid] += prim_screen_stat_info[0];
simint->num_screened_prim[tid] += prim_screen_stat_info[1];
simint->num_unscreened_vec[tid] += prim_screen_stat_info[2];
simint->num_screened_vec[tid] += prim_screen_stat_info[3];
if (tid == 0)
{
simint->ostei_setup += setup_end - setup_start;
simint->ostei_actual += ostei_end - ostei_start;
}
}
void CMS_Simint_calc_MNMN_shellquartet(
Simint_p simint, int tid, int M, int N,
void **multi_sp_, double **ERI, int *nint
)
{
double setup_start, setup_end, ostei_start, ostei_end;
if (tid == 0) setup_start = CMS_get_walltime_sec();
// Don't need to call simint_free_multi_shellpair() after use,
// Simint can reuse the allocated space next time if possible
multi_sp_t MN_pair = (multi_sp_t) *multi_sp_;
simint_create_multi_shellpair(
1, simint->shells + M, 1, simint->shells + N,
MN_pair, simint->screen_method
);
simint->num_multi_shellpairs[tid] += 1.0;
simint->sum_nprim[tid] += (double) MN_pair->nprim;
if (tid == 0)
{
setup_end = CMS_get_walltime_sec();
ostei_start = CMS_get_walltime_sec();
}
double *work_buff = &simint->workbuf[tid * simint->workmem_per_thread];
double *output_buff = &simint->outbuf [tid * simint->outmem_per_thread];
int ret = simint_compute_eri(
MN_pair, MN_pair, simint->screen_tol,
work_buff, output_buff
);
if (tid == 0) ostei_end = CMS_get_walltime_sec();
int ERI_size;
if (ret < 0)
{
ERI_size = 0; // Return zero ERI_size to caller; output buffer is not initialized
} else {
CMS_ASSERT(ret == 1); // Single shell quartet
shell_t shells = simint->shells;
ERI_size = NCART(shells[M].am) * NCART(shells[N].am)
* NCART(shells[M].am) * NCART(shells[N].am);
}
*ERI = output_buff;
*nint = ERI_size;
double *prim_screen_stat_info = *ERI + ERI_size;
simint->num_unscreened_prim[tid] += prim_screen_stat_info[0];
simint->num_screened_prim[tid] += prim_screen_stat_info[1];
simint->num_unscreened_vec[tid] += prim_screen_stat_info[2];
simint->num_screened_vec[tid] += prim_screen_stat_info[3];
if (tid == 0)
{
simint->ostei_setup += setup_end - setup_start;
simint->ostei_actual += ostei_end - ostei_start;
}
}
void CMS_Simint_calc_shellquartet_batch(
Simint_p simint, int tid, int M, int N, int npair, int *P_list,
int *Q_list, double **batch_ERI, int *batch_nint, void **multi_sp_
)
{
double setup_start, setup_end, ostei_start, ostei_end;
if (tid == 0) setup_start = CMS_get_walltime_sec();
multi_sp_t bra_pair = &simint->shellpairs[M * simint->nshell + N];
multi_sp_t ket_pairs = (multi_sp_t) *multi_sp_;
CMS_Simint_fill_multi_sp_list(simint, npair, P_list, Q_list, ket_pairs);
simint->num_multi_shellpairs[tid] += 1.0;
simint->sum_nprim[tid] += (double) ket_pairs->nprim;
if (tid == 0)
{
setup_end = CMS_get_walltime_sec();
ostei_start = CMS_get_walltime_sec();
}
double *work_buff = &simint->workbuf[tid * simint->workmem_per_thread];
double *output_buff = &simint->outbuf [tid * simint->outmem_per_thread];
int ret = simint_compute_eri(
bra_pair, ket_pairs, simint->screen_tol,
work_buff, output_buff
);
if (tid == 0) ostei_end = CMS_get_walltime_sec();
int ERI_size;
if (ret <= 0)
{
ERI_size = 0; // Return zero ERI_size to caller; output buffer is not initialized
} else {
CMS_ASSERT(ret == npair);
shell_t shells = simint->shells;
int P = P_list[0], Q = Q_list[0];
ERI_size = NCART(shells[M].am) * NCART(shells[N].am)
* NCART(shells[P].am) * NCART(shells[Q].am);
}
// Shells in P_list[] have same AM, shells in Q_list[] have same AM,
// The result sizes for each quartets are the same
*batch_ERI = output_buff;
*batch_nint = ERI_size;
double *prim_screen_stat_info = *batch_ERI + ERI_size * npair;
simint->num_unscreened_prim[tid] += prim_screen_stat_info[0];
simint->num_screened_prim[tid] += prim_screen_stat_info[1];
simint->num_unscreened_vec[tid] += prim_screen_stat_info[2];
simint->num_screened_vec[tid] += prim_screen_stat_info[3];
if (tid == 0)
{
simint->ostei_setup += setup_end - setup_start;
simint->ostei_actual += ostei_end - ostei_start;
}
}
static void CMS_Simint_fill_DF_multi_sp_list(
Simint_p simint, int npair, int *P_list,
struct simint_multi_shellpair *multi_sp
)
{
// Put the original multi_shellpairs corresponding to the shell
// pairs (P_list[i], Q_list[i]) into the list
multi_sp_t Pin[_SIMINT_NSHELL_SIMD];
for (int ipair = 0; ipair < npair; ipair++)
{
int P = P_list[ipair];
Pin[ipair] = &simint->df_shellpairs[P];
}
// Reset output multi_sp and copy from existing multi_shellpairs.
// simint_cat_multi_shellpair() will check and allocate memory for output
multi_sp->nprim = 0;
simint_cat_shellpairs(
npair, (const struct simint_multi_shellpair **) Pin,
multi_sp, simint->screen_method
);
}
void CMS_Simint_calc_DF_shellpair(
Simint_p simint, int tid, int M, int N,
double **integrals, int *nint
)
{
double setup_start, setup_end, ostei_start, ostei_end;
if (tid == 0) setup_start = CMS_get_walltime_sec();
multi_sp_t bra_pair = &simint->df_shellpairs[M];
multi_sp_t ket_pair = &simint->df_shellpairs[N];
simint->num_multi_shellpairs[tid] += 1.0;
simint->sum_nprim[tid] += (double) ket_pair->nprim;
if (tid == 0)
{
setup_end = CMS_get_walltime_sec();
ostei_start = CMS_get_walltime_sec();
}
double *work_buff = &simint->workbuf[tid * simint->workmem_per_thread];
double *output_buff = &simint->outbuf [tid * simint->outmem_per_thread];
int ret = simint_compute_eri(
bra_pair, ket_pair, simint->screen_tol,
work_buff, output_buff
);
if (tid == 0) ostei_end = CMS_get_walltime_sec();
int ERI_size;
if (ret < 0)
{
ERI_size = 0; // Return zero ERI_size to caller; shell quartet is screened
} else {
CMS_ASSERT(ret == 1);
shell_t df_shells = simint->df_shells;
ERI_size = NCART(df_shells[M].am) * NCART(df_shells[N].am);
}
*integrals = output_buff;
*nint = ERI_size;
double *prim_screen_stat_info = *integrals + ERI_size;
simint->num_unscreened_prim[tid] += prim_screen_stat_info[0];
simint->num_screened_prim[tid] += prim_screen_stat_info[1];
simint->num_unscreened_vec[tid] += prim_screen_stat_info[2];
simint->num_screened_vec[tid] += prim_screen_stat_info[3];
if (tid == 0)
{
simint->ostei_setup += setup_end - setup_start;
simint->ostei_actual += ostei_end - ostei_start;
}
}
void CMS_Simint_calc_DF_shellquartet_batch(
Simint_p simint, int tid, int M, int N, int npair, int *P_list,
double **batch_ERI, int *batch_nint, void **multi_sp_
)
{
double setup_start, setup_end, ostei_start, ostei_end;
if (tid == 0) setup_start = CMS_get_walltime_sec();
multi_sp_t bra_pair = &simint->shellpairs[M * simint->nshell + N];
multi_sp_t ket_pairs = (multi_sp_t) *multi_sp_;
CMS_Simint_fill_DF_multi_sp_list(simint, npair, P_list, ket_pairs);
simint->num_multi_shellpairs[tid] += 1.0;
simint->sum_nprim[tid] += (double) ket_pairs->nprim;
if (tid == 0)
{
setup_end = CMS_get_walltime_sec();
ostei_start = CMS_get_walltime_sec();
}
double *work_buff = &simint->workbuf[tid * simint->workmem_per_thread];
double *output_buff = &simint->outbuf [tid * simint->outmem_per_thread];
int ret = simint_compute_eri(
bra_pair, ket_pairs, simint->screen_tol,
work_buff, output_buff
);
if (tid == 0) ostei_end = CMS_get_walltime_sec();
int ERI_size;
if (ret <= 0)
{
ERI_size = 0; // Return zero ERI_size to caller; output buffer is not initialized
} else {
CMS_ASSERT(ret == npair);
shell_t shells = simint->shells;
shell_t df_shells = simint->df_shells;
int P = P_list[0];
ERI_size = NCART(shells[M].am) * NCART(shells[N].am) * NCART(df_shells[P].am);
}
// Shells in P_list[] have same AM, shells in Q_list[] have same AM,
// The result sizes for each quartets are the same
*batch_ERI = output_buff;
*batch_nint = ERI_size;
double *prim_screen_stat_info = *batch_ERI + ERI_size * npair;
simint->num_unscreened_prim[tid] += prim_screen_stat_info[0];
simint->num_screened_prim[tid] += prim_screen_stat_info[1];
simint->num_unscreened_vec[tid] += prim_screen_stat_info[2];
simint->num_screened_vec[tid] += prim_screen_stat_info[3];
if (tid == 0)
{
simint->ostei_setup += setup_end - setup_start;
simint->ostei_actual += ostei_end - ostei_start;
}
}
void CMS_Simint_add_accF_timer(Simint_p simint, double sec)
{
simint->fock_update_F += sec;
}
void CMS_Simint_reset_stat_info(Simint_p simint)
{
int stat_info_size = sizeof(double) * simint->nthread;
memset(simint->num_multi_shellpairs, 0, stat_info_size);
memset(simint->sum_nprim, 0, stat_info_size);
memset(simint->num_screened_prim, 0, stat_info_size);
memset(simint->num_unscreened_prim, 0, stat_info_size);
memset(simint->num_screened_vec, 0, stat_info_size);
memset(simint->num_unscreened_vec, 0, stat_info_size);
}
|
lostintranslation.h | #include <omp.h>
#include <fstream>
#include <iostream>
#include <string>
#include <cstdlib>
#include <cstdio>
#include <algorithm>
#include <cmath>
#include <numeric>
#include <vector>
#include <sys/types.h>
#include <sys/stat.h>
#include <math.h>
#include <limits.h>
#include <bitset>
#include <map>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <unistd.h>
#include <ctype.h>
#include <sstream>
#include <set>
#include <memory>
#include <typeinfo>
using namespace std;
/* PAF format: https://github.com/lh3/miniasm/blob/master/PAF.md
* column seq name
* column seq length
* column seq start
* column seq end
* strand (+/-)
* row seq name
* row seq length
* row seq start
* row seq end
* number of residue matches (alignment score) ---> GGGG: I set this to 0 if missing
* alignment block length (overlap length) ---> GGGG: I compute this if missing
* mapping quality (0-255; 255 for missing) */
/* If PAF is generated from an alignment, column 10 equals the number of sequence matches, and column 11 equals the total
number of sequence matches, mismatches, insertions and deletions in the alignment. If alignment is not available,
column 10 and 11 are still required but may be highly inaccurate. */
//=======================================================================
//
// Common functions
//
//=======================================================================
int estimate (int begpV, int endpV, int lenV, int begpH, int endpH, int lenH)
{
int diffV = endpV - begpV;
int diffH = endpH - begpH;
int minL = min(begpV, begpH);
int minR = min(lenV - endpV, lenH - endpH);
int ovlen = minL + minR + (diffV + diffH)/2;
return ovlen;
}
vector<std::string> split (const std::string &s, char delim)
{
std::vector<std::string> result;
std::stringstream ss (s);
std::string item;
while (std::getline (ss, item, delim))
{
result.push_back (item);
}
return result;
}
/* from mecat index file to a map<uint32_t,string> : map<index,read-name> */
void mecatidx (ifstream& idx2read, map<uint32_t, std::string>& names)
{
string num, name, seq;
uint32_t idx;
if(idx2read.is_open())
{
string line;
while(getline(idx2read, line))
{
stringstream linestream(line);
getline(linestream, num, ' ' );
getline(linestream, name, ' ' );
/* sequence on new line */
getline(idx2read, seq);
idx = stoi(num);
/* remove first char'>' */
name.erase(0, 1);
names.insert(std::make_pair(idx, name));
}
cout << "MECAT idx2read table created" << endl;
}
else
{
cout << "Error creating names table from idx2read" << endl;
exit(1);
}
}
/* from mecat numeric id to read name */
std::string idx2read(uint32_t idx, map<uint32_t, std::string>& names)
{
map<uint32_t, std::string>::iterator it;
string name;
it = names.find(idx);
if(it != names.end())
return names[idx];
else
{
cout << "Read " << idx << " not present in MECAT output" << endl;
exit(1);
}
}
//=======================================================================
//
// BELLA to PAF (BELLA directly outputs in PAF format if run with -p)
//
//=======================================================================
void BELLA2PAF(ifstream& input, char* filename)
{
int maxt = 1;
#pragma omp parallel
{
maxt = omp_get_num_threads();
}
uint64_t numoverlap = std::count(std::istreambuf_iterator<char>(input), std::istreambuf_iterator<char>(), '\n');
input.seekg(0, std::ios_base::beg);
vector<std::string> entries;
vector<std::stringstream> local(maxt);
/* read input file */
if(input)
for (int i = 0; i < numoverlap; ++i)
{
std::string line;
std::getline(input, line);
entries.push_back(line);
}
input.close();
/* transform BELLA output in PAF format */
#pragma omp parallel for
for(uint64_t i = 0; i < numoverlap; i++)
{
int ithread = omp_get_thread_num();
/* BELLA format: cname, rname, numkmer, score, rev, cstart, cend, clen, rstart, rend, rlen */
std::vector<std::string> v = split (entries[i], '\t');
/* improve readability */
std::string& nameV = v[0];
std::string& nameH = v[1];
std::string& isRev = v[5];
int kmers = std::stoi(v[2]);
int score = std::stoi(v[3]);
int ovlen = std::stoi(v[4]);
int begpV = std::stoi(v[6]);
int endpV = std::stoi(v[7]);
int lengV = std::stoi(v[8]);
int begpH = std::stoi(v[9]);
int endpH = std::stoi(v[10]);
int lengH = std::stoi(v[11]);
/* change strand formatting */
if(isRev == "n") isRev = "+";
else isRev = "-";
// GG: sequence divergence estimation
int overlapLenV = endpV - begpV;
int overlapLenH = endpH - begpH;
int normLen = max(overlapLenV, overlapLenH);
int size = 17;
float matchRate = (float)kmers / (float)normLen;
float seqDivergence = std::log(1 / matchRate) / size;
// /* compute overlap length if missing (begpV, endpV, lenV, begpH, endpH, lenH) */
// int ovlen = estimate (stoi(begpV), stoi(endpV), stoi(lengV), stoi(begpH), stoi(endpH), stoi(lengH));
local[ithread] << nameV << "\t" << lengV << "\t" << begpV << "\t" << endpV << "\t" << isRev
<< "\t" << nameH << "\t" << lengH << "\t" << begpH << "\t" << endpH << "\t" << (int)(normLen * seqDivergence)
<< "\t" << normLen << "\t255" << endl;
}
/* write to a new file */
int64_t * bytes = new int64_t[maxt];
for(int i = 0; i < maxt; ++i)
{
local[i].seekg(0, ios::end);
bytes[i] = local[i].tellg();
local[i].seekg(0, ios::beg);
}
int64_t bytestotal = std::accumulate(bytes, bytes + maxt, static_cast<int64_t>(0));
std::ofstream output(filename, std::ios::binary | std::ios::app);
#ifdef PRINT
cout << "Creating or appending to output file with " << (double)bytestotal/(double)(1024 * 1024) << " MB" << endl;
#endif
output.seekp(bytestotal - 1);
/* this will likely create a sparse file so the actual disks won't spin yet */
output.write("", 1);
output.close();
#pragma omp parallel
{
int ithread = omp_get_thread_num();
FILE *ffinal;
/* then everyone fills it */
if ((ffinal = fopen(filename, "rb+")) == NULL)
{
fprintf(stderr, "File %s failed to open at thread %d\n", filename, ithread);
}
int64_t bytesuntil = std::accumulate(bytes, bytes + ithread, static_cast<int64_t>(0));
fseek (ffinal , bytesuntil , SEEK_SET);
std::string text = local[ithread].str();
fwrite(text.c_str(),1, bytes[ithread], ffinal);
fflush(ffinal);
fclose(ffinal);
}
delete [] bytes;
}
//=======================================================================
//
// MHAP to PAF
//
//=======================================================================
void MHAP2PAF(ifstream& input, char* filename)
{
int maxt = 1;
#pragma omp parallel
{
maxt = omp_get_num_threads();
}
uint64_t numoverlap = std::count(std::istreambuf_iterator<char>(input), std::istreambuf_iterator<char>(), '\n');
input.seekg(0, std::ios_base::beg);
vector<std::string> entries;
vector<std::stringstream> local(maxt);
/* read input file */
if(input)
for (int i = 0; i < numoverlap; ++i)
{
std::string line;
std::getline(input, line);
entries.push_back(line);
}
input.close();
/* transform MHAP output in PAF format */
#pragma omp parallel for
for(uint64_t i = 0; i < numoverlap; i++)
{
int ithread = omp_get_thread_num();
/* MHAP format: cname, rname, err, nkmer, cstrand, cstart, cend, clen, rstrand, rstart, rend, rlen */
std::vector<std::string> v = split (entries[i], ' ');
/* improve readability */
std::string& nameV = v[0];
std::string& nameH = v[1];
std::string& begpV = v[5];
std::string& endpV = v[6];
std::string& lengV = v[7];
std::string& isRev = v[8];
std::string& begpH = v[9];
std::string& endpH = v[10];
std::string& lengH = v[11];
/* change strand formatting */
if(isRev == "0") isRev = "+";
else isRev = "-";
/* compute overlap length if missing (begpV, endpV, lenV, begpH, endpH, lenH) */
int ovlen = estimate (stoi(begpV), stoi(endpV), stoi(lengV), stoi(begpH), stoi(endpH), stoi(lengH));
/* GGGG: If alignment is missing I estimate it as % of the overlap length and I determine that % using the error rate */
// GGGG: Error rate is now hard-coded, need to be an input parameter
float error = 0.15;
float identity = (1-error)*(1-error);
int score = floor(identity*ovlen);
local[ithread] << nameV << "\t" << lengV << "\t" << begpV << "\t" << endpV << "\t" << isRev
<< "\t" << nameH << "\t" << lengH << "\t" << begpH << "\t" << endpH << "\t" << score
<< "\t" << ovlen << "\t255" << endl;
}
/* write to a new file */
int64_t * bytes = new int64_t[maxt];
for(int i = 0; i < maxt; ++i)
{
local[i].seekg(0, ios::end);
bytes[i] = local[i].tellg();
local[i].seekg(0, ios::beg);
}
int64_t bytestotal = std::accumulate(bytes, bytes + maxt, static_cast<int64_t>(0));
std::ofstream output(filename, std::ios::binary | std::ios::app);
#ifdef PRINT
cout << "Creating or appending to output file with " << (double)bytestotal/(double)(1024 * 1024) << " MB" << endl;
#endif
output.seekp(bytestotal - 1);
/* this will likely create a sparse file so the actual disks won't spin yet */
output.write("", 1);
output.close();
#pragma omp parallel
{
int ithread = omp_get_thread_num();
FILE *ffinal;
/* then everyone fills it */
if ((ffinal = fopen(filename, "rb+")) == NULL)
{
fprintf(stderr, "File %s failed to open at thread %d\n", filename, ithread);
}
int64_t bytesuntil = std::accumulate(bytes, bytes + ithread, static_cast<int64_t>(0));
fseek (ffinal , bytesuntil , SEEK_SET);
std::string text = local[ithread].str();
fwrite(text.c_str(),1, bytes[ithread], ffinal);
fflush(ffinal);
fclose(ffinal);
}
delete [] bytes;
}
//=======================================================================
//
// MECAT to PAF
//
//=======================================================================
void MECAT2PAF(ifstream& input, char* filename, ifstream& index)
{
map<uint32_t, std::string> names;
mecatidx (index, names);
int maxt = 1;
#pragma omp parallel
{
maxt = omp_get_num_threads();
}
uint64_t numoverlap = std::count(std::istreambuf_iterator<char>(input), std::istreambuf_iterator<char>(), '\n');
input.seekg(0, std::ios_base::beg);
vector<std::string> entries;
vector<std::stringstream> local(maxt);
/* read input file */
if(input)
for (int i = 0; i < numoverlap; ++i)
{
std::string line;
std::getline(input, line);
entries.push_back(line);
}
input.close();
/* transform MECAT output in PAF format */
#pragma omp parallel for
for(uint64_t i = 0; i < numoverlap; i++)
{
std::stringstream linestream(entries[i]);
int ithread = omp_get_thread_num();
/* MECAT format: cid, rid, score, id, cstr, cstart, cend, clen, rstr, rstart, rend, rlen */
std::vector<std::string> v = split (entries[i], '\t');
/* mecat idx to nametag translation */
std::string nameV = idx2read (stoi(v[0]), names);
std::string nameH = idx2read (stoi(v[1]), names);
std::string& ident = v[2];
std::string& begpV = v[5];
std::string& endpV = v[6];
std::string& lengV = v[7];
std::string& isRev = v[8];
std::string& begpH = v[9];
std::string& endpH = v[10];
std::string& lengH = v[11];
/* change strand formatting */
if(isRev == "0") isRev = "+";
else isRev = "-";
/* compute overlap length if missing (begpV, endpV, lenV, begpH, endpH, lenH) */
int ovlen = estimate (stoi(begpV), stoi(endpV), stoi(lengV), stoi(begpH), stoi(endpH), stoi(lengH));
/* If alignment is missing I estimate it as (ident) *ovlen */
int score = floor((stod(ident)*ovlen) / 100);
/* GGGG: I might need to translate back idx to original names ---> YES, I NEED THE ORIGINAL NAMES. */
local[ithread] << nameV << "\t" << lengV << "\t" << begpV << "\t" << endpV << "\t" << isRev
<< "\t" << nameH << "\t" << lengH << "\t" << begpH << "\t" << endpH << "\t" << score
<< "\t" << ovlen << "\t255" << endl;
}
/* write to a new file */
int64_t * bytes = new int64_t[maxt];
for(int i = 0; i < maxt; ++i)
{
local[i].seekg(0, ios::end);
bytes[i] = local[i].tellg();
local[i].seekg(0, ios::beg);
}
int64_t bytestotal = std::accumulate(bytes, bytes + maxt, static_cast<int64_t>(0));
std::ofstream output(filename, std::ios::binary | std::ios::app);
#ifdef PRINT
cout << "Creating or appending to output file with " << (double)bytestotal/(double)(1024 * 1024) << " MB" << endl;
#endif
output.seekp(bytestotal - 1);
/* this will likely create a sparse file so the actual disks won't spin yet */
output.write("", 1);
output.close();
#pragma omp parallel
{
int ithread = omp_get_thread_num();
FILE *ffinal;
/* then everyone fills it */
if ((ffinal = fopen(filename, "rb+")) == NULL)
{
fprintf(stderr, "File %s failed to open at thread %d\n", filename, ithread);
}
int64_t bytesuntil = std::accumulate(bytes, bytes + ithread, static_cast<int64_t>(0));
fseek (ffinal , bytesuntil , SEEK_SET);
std::string text = local[ithread].str();
fwrite(text.c_str(),1, bytes[ithread], ffinal);
fflush(ffinal);
fclose(ffinal);
}
delete [] bytes;
}
//=======================================================================
//
// BLASR to PAF
//
//=======================================================================
void BLASR2PAF(ifstream& input, char* filename)
{
int maxt = 1;
#pragma omp parallel
{
maxt = omp_get_num_threads();
}
uint64_t numoverlap = std::count(std::istreambuf_iterator<char>(input), std::istreambuf_iterator<char>(), '\n');
input.seekg(0, std::ios_base::beg);
vector<std::string> entries;
vector<std::stringstream> local(maxt);
/* read input file */
if(input)
for (int i = 0; i < numoverlap; ++i)
{
std::string line;
std::getline(input, line);
entries.push_back(line);
}
input.close();
/* transform BLASR output in PAF format */
#pragma omp parallel for
for(uint64_t i = 0; i < numoverlap; i++)
{
int ithread = omp_get_thread_num();
/* BLASR format: cname, rname, score, id, cstr, cstart, cend, clen, rstr, rstart, rend, rlen, qv */
std::vector<std::string> v = split (entries[i], ' ');
/* improve readability */
std::string& nameV = v[0];
std::string& nameH = v[1];
std::string& score = v[2];
std::string& strnV = v[4];
std::string& begpV = v[5];
std::string& endpV = v[6];
std::string& lengV = v[7];
std::string& strnH = v[8];
std::string& begpH = v[9];
std::string& endpH = v[10];
std::string& lengH = v[11];
std::string& mapQV = v[12];
/* change strand formatting */
std::string isRev;
if(strnH == strnV) isRev = "+";
else isRev = "-";
// GGGG: BLSR scores are negatives? Dig into this.
score.erase(0, 1);
/* compute overlap length if missing (begpV, endpV, lenV, begpH, endpH, lenH) */
int ovlen = estimate (stoi(begpV), stoi(endpV), stoi(lengV), stoi(begpH), stoi(endpH), stoi(lengH));
local[ithread] << nameV << "\t" << lengV << "\t" << begpV << "\t" << endpV << "\t" << isRev
<< "\t" << nameH << "\t" << lengH << "\t" << begpH << "\t" << endpH << "\t" << score
<< "\t" << ovlen << "\t" << mapQV << endl;
}
/* write to a new file */
int64_t * bytes = new int64_t[maxt];
for(int i = 0; i < maxt; ++i)
{
local[i].seekg(0, ios::end);
bytes[i] = local[i].tellg();
local[i].seekg(0, ios::beg);
}
int64_t bytestotal = std::accumulate(bytes, bytes + maxt, static_cast<int64_t>(0));
std::ofstream output(filename, std::ios::binary | std::ios::app);
#ifdef PRINT
cout << "Creating or appending to output file with " << (double)bytestotal/(double)(1024 * 1024) << " MB" << endl;
#endif
output.seekp(bytestotal - 1);
/* this will likely create a sparse file so the actual disks won't spin yet */
output.write("", 1);
output.close();
#pragma omp parallel
{
int ithread = omp_get_thread_num();
FILE *ffinal;
/* then everyone fills it */
if ((ffinal = fopen(filename, "rb+")) == NULL)
{
fprintf(stderr, "File %s failed to open at thread %d\n", filename, ithread);
}
int64_t bytesuntil = std::accumulate(bytes, bytes + ithread, static_cast<int64_t>(0));
fseek (ffinal , bytesuntil , SEEK_SET);
std::string text = local[ithread].str();
fwrite(text.c_str(),1, bytes[ithread], ffinal);
fflush(ffinal);
fclose(ffinal);
}
delete [] bytes;
}
//=======================================================================
//
// DALIGNER (translated in BELLA format) to PAF
//
//=======================================================================
void DALIGNER2PAF(ifstream& input, char* filename)
{
int maxt = 1;
#pragma omp parallel
{
maxt = omp_get_num_threads();
}
uint64_t numoverlap = std::count(std::istreambuf_iterator<char>(input), std::istreambuf_iterator<char>(), '\n');
input.seekg(0, std::ios_base::beg);
vector<std::string> entries;
vector<std::stringstream> local(maxt);
/* read input file */
if(input)
for (int i = 0; i < numoverlap; ++i)
{
std::string line;
std::getline(input, line);
entries.push_back(line);
}
input.close();
/* transform DALIGNER output in PAF format */
#pragma omp parallel for
for(uint64_t i = 0; i < numoverlap; i++)
{
int ithread = omp_get_thread_num();
/* DALIGNER format: cname, rname, rev, cstart, cend, clen, rstart, rend, rlen */
std::vector<std::string> v = split (entries[i], ' ');
/* improve readability */
std::string& nameV = v[0];
std::string& nameH = v[1];
std::string& isRev = v[2];
std::string& begpV = v[3];
std::string& endpV = v[4];
std::string& lengV = v[5];
std::string& begpH = v[6];
std::string& endpH = v[7];
std::string& lengH = v[8];
/* change strand formatting */
if(isRev == "n") isRev = "+";
else isRev = "-";
/* compute overlap length if missing (begpV, endpV, lenV, begpH, endpH, lenH) */
int ovlen = estimate (stoi(begpV), stoi(endpV), stoi(lengV), stoi(begpH), stoi(endpH), stoi(lengH));
/* GGGG: If alignment is missing I estimate it as % of the overlap length and I determine that % using the error rate */
// GGGG: Error rate is now hard-coded, need to be an input parameter
float error = 0.15;
float identity = (1-error)*(1-error);
int score = floor(identity*ovlen);
local[ithread] << nameV << "\t" << lengV << "\t" << begpV << "\t" << endpV << "\t" << isRev
<< "\t" << nameH << "\t" << lengH << "\t" << begpH << "\t" << endpH << "\t" << score
<< "\t" << ovlen << "\t255" << endl;
}
/* write to a new file */
int64_t * bytes = new int64_t[maxt];
for(int i = 0; i < maxt; ++i)
{
local[i].seekg(0, ios::end);
bytes[i] = local[i].tellg();
local[i].seekg(0, ios::beg);
}
int64_t bytestotal = std::accumulate(bytes, bytes + maxt, static_cast<int64_t>(0));
std::ofstream output(filename, std::ios::binary | std::ios::app);
#ifdef PRINT
cout << "Creating or appending to output file with " << (double)bytestotal/(double)(1024 * 1024) << " MB" << endl;
#endif
output.seekp(bytestotal - 1);
/* this will likely create a sparse file so the actual disks won't spin yet */
output.write("", 1);
output.close();
#pragma omp parallel
{
int ithread = omp_get_thread_num();
FILE *ffinal;
/* then everyone fills it */
if ((ffinal = fopen(filename, "rb+")) == NULL)
{
fprintf(stderr, "File %s failed to open at thread %d\n", filename, ithread);
}
int64_t bytesuntil = std::accumulate(bytes, bytes + ithread, static_cast<int64_t>(0));
fseek (ffinal , bytesuntil , SEEK_SET);
std::string text = local[ithread].str();
fwrite(text.c_str(),1, bytes[ithread], ffinal);
fflush(ffinal);
fclose(ffinal);
}
delete [] bytes;
}
|
mandelbrot_omp.c | # include <stdlib.h>
# include <stdio.h>
# include <math.h>
# include <time.h>
int main ( )
{
int m = 1000;
int n = 1000;
struct timespec ts_start, ts_end;
float time_total;
int c;
int count_max = 1000;
int i, j, k;
int jhi, jlo;
char *output_filename = "mandelbrot.ppm";
FILE *output_unit;
int **r, **g, **b, **count;
double x_max = 1.25;
double x_min = - 2.25;
double x, x1, x2;
double y_max = 1.75;
double y_min = - 1.75;
double y, y1, y2;
int i4_min ( int i1, int i2 );
r=(int**)malloc(m*sizeof(int*));
for(i=0;i<m;i++)
r[i]=malloc(n*sizeof(int));
g=(int**)malloc(m*sizeof(int*));
for(i=0;i<m;i++)
g[i]=malloc(n*sizeof(int));
b=(int**)malloc(m*sizeof(int*));
for(i=0;i<m;i++)
b[i]=malloc(n*sizeof(int));
count=(int**)malloc(m*sizeof(int*));
for(i=0;i<m;i++)
count[i]=malloc(n*sizeof(int));
clock_gettime(CLOCK_MONOTONIC, &ts_start);
/* Carry out the iteration for each pixel, determining COUNT. */
#pragma omp parallel for shared ( b, count, count_max, g, r, x_max, x_min, y_max, y_min ) \
private ( i, j, k, x, x1, x2, y, y1, y2 )
for ( i = 0; i < m; i++ )
{
y = ((i-1)*y_max + (m-i)*y_min)/(m-1);
for ( j = 0; j < n; j++ )
{
x = ((j-1)*x_max + (n-j)*x_min)/(n-1);
count[i][j] = 0;
x1 = x;
y1 = y;
for ( k = 1; k <= count_max; k++ )
{
x2 = x1*x1 - y1*y1 + x;
y2 = 2*x1*y1 + y;
if (x2 < -2.0 || 2.0 < x2 || y2 < -2.0 || 2.0 < y2 )
{
count[i][j] = k;
break;
}
x1 = x2;
y1 = y2;
}
/* If count is 0 the point is not in set */
if ( ( count[i][j] % 2 ) == 1 )
{
r[i][j] = 255;
g[i][j] = 255;
b[i][j] = 255;
}
else
{
c = (int) (255.0*(1-log(count[i][j])/log(count_max)));
r[i][j] = c;
g[i][j] = c;
b[i][j] = c;
}
}
}
clock_gettime(CLOCK_MONOTONIC, &ts_end);
time_total = (ts_end.tv_sec - ts_start.tv_sec)*1000000000 + (ts_end.tv_nsec - ts_start.tv_nsec);
printf("\nTotal time is %f ms", time_total/1000000);
/* Write data to an ASCII PPM file. */
output_unit = fopen ( output_filename, "wt" );
fprintf ( output_unit, "P3\n" );
fprintf ( output_unit, "%d %d\n", n, m );
fprintf ( output_unit, "%d\n", 255 );
for ( i = 0; i < m; i++ )
{
for ( jlo = 0; jlo < n; jlo = jlo + 4 )
{
jhi = i4_min ( jlo + 4, n );
for ( j = jlo; j < jhi; j++ )
{
fprintf ( output_unit, " %d %d %d", r[i][j], g[i][j], b[i][j] );
}
fprintf ( output_unit, "\n" );
}
}
fclose ( output_unit );
printf ( "\n" );
printf ( " Graphics data written to \"%s\".\n", output_filename );
printf ( "\n" );
return 0;
}
int i4_min ( int i1, int i2 )
{
int value;
if ( i1 < i2 )
value = i1;
else
value = i2;
return value;
}
|
efs_fmt_plug.c | /* EFS cracker. Hacked together during 2013 monsoons by Dhiru Kholia
* <dhiru.kholia at gmail.com>.
*
* This software is Copyright (c) 2013, Dhiru Kholia <dhiru.kholia at gmail.com>,
* and it is hereby released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* All credits for the algorithm go to "dpapick" project,
* https://bitbucket.org/jmichel/dpapick
*
* Hash Format ==> $efs$version$SID$iv$iterations$ciphertext
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_efs;
#elif FMT_REGISTERS_H
john_register_one(&fmt_efs);
#else
#include <string.h>
#include <assert.h>
#include <errno.h>
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "memory.h"
#include "options.h"
#include "unicode.h"
#include "sha.h"
#include "gladman_hmac.h"
#include "sse-intrinsics.h"
#define EFS_CRAP_LOGIC
#include "pbkdf2_hmac_sha1.h"
#include <openssl/des.h>
#ifdef _OPENMP
#include <omp.h>
#define OMP_SCALE 64
#endif
#include "memdbg.h"
#ifdef MMX_COEF
#define SHA1_BLK (SHA1_SSE_PARA * MMX_COEF)
#endif
#define FORMAT_LABEL "EFS"
#define FORMAT_TAG "$efs$"
#define TAG_LENGTH (sizeof(FORMAT_TAG) - 1)
#define FORMAT_NAME ""
#ifdef MMX_COEF_SHA512
#define ALGORITHM_NAME "PBKDF2-SHA1-efs-variant 3DES " SHA512_ALGORITHM_NAME
#else
#define ALGORITHM_NAME "PBKDF2-SHA1-efs-variant 3DES 32/" ARCH_BITS_STR
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define BINARY_SIZE 0
#define PLAINTEXT_LENGTH 125
#define MAX_CT_LEN 4096
#define MAX_IV_LEN 16
#define MAX_SID_LEN 1024
#define SALT_SIZE sizeof(*cur_salt)
#define BINARY_ALIGN 1
#define SALT_ALIGN sizeof(int)
#ifdef MMX_COEF
#define MIN_KEYS_PER_CRYPT SHA1_BLK
#define MAX_KEYS_PER_CRYPT SHA1_BLK
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
static struct fmt_tests efs_tests[] = {
/* Windows XP, openwall.efs */
{"$efs$0$S-1-5-21-1482476501-1659004503-725345543-1003$b3d62a0b06cecc236fe3200460426a13$4000$d3841257348221cd92caf4427a59d785ed1474cab3d0101fc8d37137dbb598ff1fd2455826128b2594b846934c073528f8648d750d3c8e6621e6f706d79b18c22f172c0930d9a934de73ea2eb63b7b44810d332f7d03f14d1c153de16070a5cab9324da87405c1c0", "openwall"},
/* Windows XP, openwall.efs.2 */
{"$efs$0$S-1-5-21-1482476501-1659004503-725345543-1005$c9cbd491f78ea6d512276b33f025bce8$4000$091a13443cfc2ddb16dcf256ab2a6707a27aa22b49a9a9011ebf3bb778d0088c2896de31de67241d91df75306e56f835337c89cfb2f9afa940b4e7e019ead2737145032fac0bb34587a707d42da7e00b72601a730f5c848094d54c47c622e2f8c8d204c80ad061be", "JtRisthebest"},
{NULL}
};
#ifndef min
#define min( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#if defined (_OPENMP)
static int omp_t = 1;
#endif
static UTF16 (*saved_key)[PLAINTEXT_LENGTH + 1];
static int *cracked;
static struct custom_salt {
unsigned char iv[MAX_IV_LEN];
int ivlen;
int iterations;
int ctlen;
int version; // for future expansion
unsigned char ct[MAX_CT_LEN];
UTF16 SID[MAX_SID_LEN+1];
} *cur_salt;
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc_tiny(sizeof(*saved_key) *
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
cracked = mem_calloc_tiny(sizeof(*cracked) *
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
}
static int ishex(char *q)
{
while (atoi16[ARCH_INDEX(*q)] != 0x7F)
q++;
return !*q;
}
static int isdec(char *q)
{
char buf[24];
int x = atoi(q);
sprintf(buf, "%d", x);
return !strcmp(q,buf) && *q != '-';
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy, *keeptr, *p;
if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += TAG_LENGTH;
if ((p = strtok(ctcopy, "$")) == NULL) /* version number */
goto err;
if(!isdec(p))
goto err;
if ((p = strtok(NULL, "$")) == NULL) /* SID */
goto err;
if (strlen(p) > MAX_SID_LEN)
goto err;
if ((p = strtok(NULL, "$")) == NULL) /* iv */
goto err;
if (strlen(p) > MAX_IV_LEN * 2 || (strlen(p)&1)) /* iv length */
goto err;
if (!ishex(p))
goto err;
if ((p = strtok(NULL, "$")) == NULL) /* iterations */
goto err;
if(!isdec(p))
goto err;
if ((p = strtok(NULL, "$")) == NULL) /* data */
goto err;
if (strlen(p) > MAX_CT_LEN * 2 || (strlen(p)&1))
goto err;
if (!ishex(p))
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
int i;
char *p;
int length;
static struct custom_salt cs;
memset(&cs, 0, sizeof(cs));
ctcopy += TAG_LENGTH; // skip over "$efs$"
p = strtok(ctcopy, "$");
cs.version = atoi(p);
p = strtok(NULL, "$");
// Convert SID to Unicode
enc_to_utf16(cs.SID, MAX_SID_LEN, (UTF8*)p, strlen(p));
p = strtok(NULL, "$");
length = strlen(p) / 2;
cs.ivlen = length;
for (i = 0; i < cs.ivlen; i++)
cs.iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtok(NULL, "$");
cs.iterations = atoi(p);
p = strtok(NULL, "$");
length = strlen(p) / 2;
cs.ctlen = length;
for (i = 0; i < cs.ctlen; i++)
cs.ct[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
MEM_FREE(keeptr);
return (void *)&cs;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int kcdecrypt(unsigned char *key, unsigned char *iv, unsigned char *pwdhash, unsigned char *data)
{
unsigned char out[MAX_CT_LEN+16];
unsigned char *hmacSalt;
unsigned char *ourKey;
unsigned char *hmac;
unsigned char hmacComputed[20];
unsigned char encKey[20];
DES_cblock key1, key2, key3;
DES_cblock ivec;
DES_key_schedule ks1, ks2, ks3;
memset(out, 0, sizeof(out));
memcpy(key1, key, 8);
memcpy(key2, key + 8, 8);
memcpy(key3, key + 16, 8);
DES_set_key((C_Block *) key1, &ks1);
DES_set_key((C_Block *) key2, &ks2);
DES_set_key((C_Block *) key3, &ks3);
memcpy(ivec, iv, 8);
DES_ede3_cbc_encrypt(data, out, cur_salt->ctlen, &ks1, &ks2, &ks3, &ivec, DES_DECRYPT);
// self.key = cleartxt[-64:]
// self.hmacSalt = cleartxt[:16]
// self.hmac = cleartxt[16:16+self.hashAlgo.digestLength]
// self.hmacComputed = crypto.DPAPIHmac(self.hashAlgo, pwdhash,
// self.hmacSalt, self.key)
// self.decrypted = self.hmac == self.hmacComputed
ourKey = out + cur_salt->ctlen - 64;
hmacSalt = out; // 16 bytes
hmac = out + 16;
// encKey = hmac.new(pwdhash, hmacSalt, dg).digest()
hmac_sha1(pwdhash, 32, hmacSalt, 16, encKey, 20);
// return hmac.new(encKey, value, dg).digest()
// dump_stuff(key, 64);
hmac_sha1(encKey, 20, ourKey, 64, hmacComputed, 20);
// dump_stuff(hmac, 20);
return memcmp(hmac, hmacComputed, 20);
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
{
#ifdef MMX_COEF
int lens[MAX_KEYS_PER_CRYPT];
unsigned char *pin[MAX_KEYS_PER_CRYPT];
union {
ARCH_WORD_32 *pout[MAX_KEYS_PER_CRYPT];
unsigned char *poutc;
} x;
#endif
// def derivePassword(userPwd, userSID, hashAlgo)
// Computes the encryption key from a user's password
// return derivePwdHash(hashlib.sha1(userPwd.encode("UTF-16LE")).digest()
unsigned char *passwordBuf;
int passwordBufSize;
unsigned char out[MAX_KEYS_PER_CRYPT][32];
unsigned char out2[MAX_KEYS_PER_CRYPT][32];
SHA_CTX ctx;
int i;
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
passwordBuf = (unsigned char*)saved_key[index+i];
passwordBufSize = strlen16((UTF16*)passwordBuf) * 2;
/* hash the UTF-16LE encoded key */
SHA1_Init(&ctx);
SHA1_Update(&ctx, passwordBuf, passwordBufSize);
SHA1_Final(out[i], &ctx);
// 2. use UTF-16LE encoded SID in HMAC
passwordBuf = (unsigned char*)cur_salt->SID;
passwordBufSize = (strlen16(cur_salt->SID) + 1) * 2;
hmac_sha1(out[i], 20, passwordBuf, passwordBufSize, out2[i], 20);
#ifdef MMX_COEF
lens[i] = 20;
pin[i] = (unsigned char*)out2[i];
x.pout[i] = (ARCH_WORD_32*)(out[i]);
#endif
}
#ifdef MMX_COEF
pbkdf2_sha1_sse((const unsigned char **)pin, lens, cur_salt->iv, 16, cur_salt->iterations, &(x.poutc), 32, 0);
#else
pbkdf2_sha1(out2[0], 20, cur_salt->iv, 16, cur_salt->iterations, out[0], 32, 0);
#endif
// kcdecrypt will use 32 bytes, we only initialized 20 so far
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
unsigned char iv[8];
memset(out2[i] + 20, 0, 32 - 20);
// split derived key into "key" and IV
memcpy(iv, out[i] + 24, 8);
if (kcdecrypt(out[i], iv, out2[i], cur_salt->ct) == 0)
cracked[index+i] = 1;
else
cracked[index+i] = 0;
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index;
for (index = 0; index < count; index++)
if (cracked[index])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void efs_set_key(char *key, int index)
{
/* Convert key to UTF-16LE (--encoding aware) */
enc_to_utf16(saved_key[index], PLAINTEXT_LENGTH, (UTF8*)key, strlen(key));
}
static char *get_key(int index)
{
return (char*)utf16_to_enc(saved_key[index]);
}
#if FMT_MAIN_VERSION > 11
static unsigned int iteration_count(void *salt)
{
struct custom_salt *my_salt;
my_salt = salt;
return (unsigned int) my_salt->iterations;
}
#endif
struct fmt_main fmt_efs = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_UNICODE | FMT_UTF8,
#if FMT_MAIN_VERSION > 11
{
"iteration count",
},
#endif
efs_tests
}, {
init,
fmt_default_done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
#if FMT_MAIN_VERSION > 11
{
iteration_count,
},
#endif
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
set_salt,
efs_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
billownoise.h | #pragma once
#ifndef BILLOW_NOISE_H
#define BILLOW_NOISE_H
#include "noisecommon.h"
#define DEFAULT_BILLOW_FREQUENCY 1.0
#define DEFAULT_BILLOW_LACUNARITY 2.0
#define DEFAULT_BILLOW_PERSISTENCE 0.5
#define DEFAULT_BILLOW_OCTAVE_COUNT 6
#define DEFAULT_BILLOW_SEED 0
#define DEFAULT_BILLOW_POSITION_X 0.0
#define DEFAULT_BILLOW_POSITION_Y 0.0
#define DEFAULT_BILLOW_POSITION_Z 0.0
#define DEFAULT_BILLOW_STEP 0.01
#define DEFAULT_BILLOW_PARALLEL false
#define DEFAULT_BILLOW_QUALITY QUALITY_STANDARD
struct BillowNoise {
float frequency;
float lacunarity;
float persistence;
unsigned char octave_count;
int seed;
float position[3];
float step;
bool parallel;
float *(*billow_func)(struct BillowNoise *, size_t, size_t, size_t);
enum NoiseQuality noise_quality;
};
static inline float *billow_noise_eval_1d(struct BillowNoise *billow_noise, size_t x_size);
static inline float *billow_noise_eval_2d(struct BillowNoise *billow_noise, size_t x_size, size_t y_size);
static inline float *billow_noise_eval_3d(struct BillowNoise *billow_noise, size_t x_size, size_t y_size, size_t z_size);
static inline float billow_noise_eval_3d_single(struct BillowNoise *billow_noise, float x_pos, float y_pos, float z_pos);
static inline float *billow_noise_eval_3d_fallback(struct BillowNoise *billow_noise, size_t x_size, size_t y_size, size_t z_size);
static inline float *billow_noise_eval_3d_sse2(struct BillowNoise *billow_noise, size_t x_size, size_t y_size, size_t z_size);
static inline float *billow_noise_eval_3d_sse4_1(struct BillowNoise *billow_noise, size_t x_size, size_t y_size, size_t z_size);
static inline float *billow_noise_eval_3d_avx(struct BillowNoise *billow_noise, size_t x_size, size_t y_size, size_t z_size);
static inline float *billow_noise_eval_3d_avx2(struct BillowNoise *billow_noise, size_t x_size, size_t y_size, size_t z_size);
static inline float *billow_noise_eval_3d_avx512(struct BillowNoise *billow_noise, size_t x_size, size_t y_size, size_t z_size);
static inline void billow_noise_init(struct BillowNoise *billow_noise) {
billow_noise->frequency = DEFAULT_BILLOW_FREQUENCY;
billow_noise->lacunarity = DEFAULT_BILLOW_LACUNARITY;
billow_noise->persistence = DEFAULT_BILLOW_PERSISTENCE;
billow_noise->octave_count = DEFAULT_BILLOW_OCTAVE_COUNT;
billow_noise->seed = DEFAULT_BILLOW_SEED;
billow_noise->noise_quality = DEFAULT_BILLOW_QUALITY;
billow_noise->position[0] = DEFAULT_BILLOW_POSITION_X;
billow_noise->position[1] = DEFAULT_BILLOW_POSITION_Y;
billow_noise->position[2] = DEFAULT_BILLOW_POSITION_X;
billow_noise->step = DEFAULT_BILLOW_STEP;
billow_noise->parallel = DEFAULT_BILLOW_PARALLEL;
switch (detect_simd_support()) {
#ifdef ARCH_32_64
case SIMD_AVX512F:
billow_noise->billow_func = &billow_noise_eval_3d_fallback;
break;
case SIMD_AVX2:
billow_noise->billow_func = &billow_noise_eval_3d_avx2;
break;
case SIMD_AVX:
billow_noise->billow_func = &billow_noise_eval_3d_avx;
break;
case SIMD_SSE4_1:
billow_noise->billow_func = &billow_noise_eval_3d_sse4_1;
break;
case SIMD_SSE2:
billow_noise->billow_func = &billow_noise_eval_3d_sse2;
break;
#else
case SIMD_NEON:
billow_noise->billow_func = &billow_noise_eval_3d_fallback;
break;
#endif
default:
billow_noise->billow_func = &billow_noise_eval_3d_fallback;
break;
}
}
static inline float *billow_noise_eval_1d(struct BillowNoise *billow_noise, size_t x_size) {
return billow_noise->billow_func(billow_noise, x_size, 1, 1);
}
static inline float *billow_noise_eval_2d(struct BillowNoise *billow_noise, size_t x_size, size_t y_size) {
return billow_noise->billow_func(billow_noise, x_size, y_size, 1);
}
static inline float *billow_noise_eval_3d(struct BillowNoise *billow_noise, size_t x_size, size_t y_size, size_t z_size) {
return billow_noise->billow_func(billow_noise, x_size, y_size, z_size);
}
static inline float billow_noise_eval_3d_single(struct BillowNoise *billow_noise, float x_pos, float y_pos, float z_pos) {
float x = (billow_noise->position[0] + (x_pos * billow_noise->step)) * billow_noise->frequency;
float y = (billow_noise->position[1] + (y_pos * billow_noise->step)) * billow_noise->frequency;
float z = (billow_noise->position[2] + (z_pos * billow_noise->step)) * billow_noise->frequency;
float value = 0.0;
float cur_persistence = 1.0;
for (int cur_octave = 0; cur_octave < billow_noise->octave_count; cur_octave++) {
float nx = make_int_32_range(x);
float ny = make_int_32_range(y);
float nz = make_int_32_range(z);
int cur_seed = (billow_noise->seed + cur_octave) & 0xffffffff;
float signal = gradient_coherent_noise_3d(nx, ny, nz, cur_seed, billow_noise->noise_quality);
signal = 2.0 * fabs(signal) - 1.0;
value += signal * cur_persistence;
x *= billow_noise->lacunarity;
y *= billow_noise->lacunarity;
z *= billow_noise->lacunarity;
cur_persistence *= billow_noise->persistence;
}
value += 0.5;
return value;
}
static inline float *billow_noise_eval_3d_fallback(struct BillowNoise *billow_noise, size_t x_size, size_t y_size, size_t z_size) {
#ifdef CUSTOM_ALLOCATOR
float *noise_set = malloc(sizeof(float) * x_size * y_size * z_size);
#else
float *noise_set = noise_allocate(sizeof(float), sizeof(float) * x_size * y_size * z_size);
#endif
#pragma omp parallel for collapse(3) if (billow_noise->parallel)
for (int z_dim = 0; z_dim < z_size; z_dim++) {
for (int y_dim = 0; y_dim < y_size; y_dim++) {
for (int x_dim = 0; x_dim < x_size; x_dim++) {
float x = (billow_noise->position[0] * billow_noise->frequency) + (x_dim * billow_noise->step);
float y = (billow_noise->position[1] * billow_noise->frequency) + (y_dim * billow_noise->step);
float z = (billow_noise->position[2] * billow_noise->frequency) + (z_dim * billow_noise->step);
float value = 0.0;
float cur_persistence = 1.0;
for (int cur_octave = 0; cur_octave < billow_noise->octave_count; cur_octave++) {
float nx = make_int_32_range(x);
float ny = make_int_32_range(y);
float nz = make_int_32_range(z);
int cur_seed = (billow_noise->seed + cur_octave) & 0xffffffff;
float signal = gradient_coherent_noise_3d(nx, ny, nz, cur_seed, billow_noise->noise_quality);
signal = 2.0 * fabs(signal) - 1.0;
value += signal * cur_persistence;
x *= billow_noise->lacunarity;
y *= billow_noise->lacunarity;
z *= billow_noise->lacunarity;
cur_persistence *= billow_noise->persistence;
}
value += 0.5;
*(noise_set + (x_dim + (y_dim * x_size) + (z_dim * (x_size * y_size)))) = value;
}
}
}
return noise_set;
}
#ifdef ARCH_32_64
static inline float *billow_noise_eval_3d_sse2(struct BillowNoise *billow_noise, size_t x_size, size_t y_size, size_t z_size) {
float *noise_set = noise_allocate(sizeof(__m128), sizeof(float) * x_size * y_size * z_size);
#pragma omp parallel for collapse(3) if (billow_noise->parallel)
for (int z_dim = 0; z_dim < z_size; z_dim++) {
for (int y_dim = 0; y_dim < y_size; y_dim++) {
for (int x_dim = 0; x_dim < x_size; x_dim += 4) {
__m128 x_vec = _mm_mul_ps(_mm_add_ps(_mm_set1_ps(billow_noise->position[0]), _mm_mul_ps(_mm_set_ps(x_dim + 3.0, x_dim + 2.0, x_dim + 1.0, x_dim), _mm_set1_ps(billow_noise->step))), _mm_set1_ps(billow_noise->frequency));
float y = (billow_noise->position[1] + (y_dim * billow_noise->step)) * billow_noise->frequency;
float z = (billow_noise->position[2] + (z_dim * billow_noise->step)) * billow_noise->frequency;
__m128 value = _mm_set1_ps(0.0);
float cur_persistence = 1.0;
for (int cur_octave = 0; cur_octave < billow_noise->octave_count; cur_octave++) {
__m128 nx = make_int_32_range_sse2(x_vec);
float ny = make_int_32_range(y);
float nz = make_int_32_range(z);
int cur_seed = (billow_noise->seed + cur_octave) & 0xffffffff;
__m128 signal = gradient_coherent_noise_3d_sse2(nx, ny, nz, cur_seed, billow_noise->noise_quality);
signal = _mm_sub_ps(_mm_mul_ps(_mm_set1_ps(2.0), _mm_andnot_ps(_mm_set1_ps(-0.0), signal)), _mm_set1_ps(1.0));
value = _mm_add_ps(value, _mm_mul_ps(signal, _mm_set1_ps(cur_persistence)));
x_vec = _mm_mul_ps(x_vec, _mm_set1_ps(billow_noise->lacunarity));
y *= billow_noise->lacunarity;
z *= billow_noise->lacunarity;
cur_persistence *= billow_noise->persistence;
}
value = _mm_add_ps(value, _mm_set1_ps(0.5));
_mm_store_ps(noise_set + (x_dim + (y_dim * x_size) + (z_dim * (x_size * y_size))), value);
}
}
}
return noise_set;
}
static inline float *billow_noise_eval_3d_sse4_1(struct BillowNoise *billow_noise, size_t x_size, size_t y_size, size_t z_size) {
float *noise_set = noise_allocate(sizeof(__m128), sizeof(float) * x_size * y_size * z_size);
#pragma omp parallel for collapse(3) if (billow_noise->parallel)
for (int z_dim = 0; z_dim < z_size; z_dim++) {
for (int y_dim = 0; y_dim < y_size; y_dim++) {
for (int x_dim = 0; x_dim < x_size; x_dim += 4) {
__m128 x_vec = _mm_mul_ps(_mm_add_ps(_mm_set1_ps(billow_noise->position[0]), _mm_mul_ps(_mm_set_ps(x_dim + 3.0, x_dim + 2.0, x_dim + 1.0, x_dim), _mm_set1_ps(billow_noise->step))), _mm_set1_ps(billow_noise->frequency));
float y = (billow_noise->position[1] + (y_dim * billow_noise->step)) * billow_noise->frequency;
float z = (billow_noise->position[2] + (z_dim * billow_noise->step)) * billow_noise->frequency;
__m128 value = _mm_set1_ps(0.0);
float cur_persistence = 1.0;
for (int cur_octave = 0; cur_octave < billow_noise->octave_count; cur_octave++) {
__m128 nx = make_int_32_range_sse2(x_vec);
float ny = make_int_32_range(y);
float nz = make_int_32_range(z);
int cur_seed = (billow_noise->seed + cur_octave) & 0xffffffff;
__m128 signal = gradient_coherent_noise_3d_sse4_1(nx, ny, nz, cur_seed, billow_noise->noise_quality);
signal = _mm_sub_ps(_mm_mul_ps(_mm_set1_ps(2.0), _mm_andnot_ps(_mm_set1_ps(-0.0), signal)), _mm_set1_ps(1.0));
value = _mm_add_ps(value, _mm_mul_ps(signal, _mm_set1_ps(cur_persistence)));
x_vec = _mm_mul_ps(x_vec, _mm_set1_ps(billow_noise->lacunarity));
y *= billow_noise->lacunarity;
z *= billow_noise->lacunarity;
cur_persistence *= billow_noise->persistence;
}
value = _mm_add_ps(value, _mm_set1_ps(0.5));
_mm_store_ps(noise_set + (x_dim + (y_dim * x_size) + (z_dim * (x_size * y_size))), value);
}
}
}
return noise_set;
}
static inline float *billow_noise_eval_3d_avx(struct BillowNoise *billow_noise, size_t x_size, size_t y_size, size_t z_size) {
float *noise_set = noise_allocate(sizeof(__m256), sizeof(float) * x_size * y_size * z_size);
#pragma omp parallel for collapse(3) if (billow_noise->parallel)
for (int z_dim = 0; z_dim < z_size; z_dim++) {
for (int y_dim = 0; y_dim < y_size; y_dim++) {
for (int x_dim = 0; x_dim < x_size; x_dim += 8) {
__m256 x_vec = _mm256_mul_ps(_mm256_add_ps(_mm256_set1_ps(billow_noise->position[0]), _mm256_mul_ps(_mm256_set_ps(x_dim + 7.0, x_dim + 6.0, x_dim + 5.0, x_dim + 4.0, x_dim + 3.0, x_dim + 2.0, x_dim + 1.0, x_dim), _mm256_set1_ps(billow_noise->step))), _mm256_set1_ps(billow_noise->frequency));
float y = (billow_noise->position[1] + (y_dim * billow_noise->step)) * billow_noise->frequency;
float z = (billow_noise->position[2] + (z_dim * billow_noise->step)) * billow_noise->frequency;
__m256 value = _mm256_set1_ps(0.0);
float cur_persistence = 1.0;
for (int cur_octave = 0; cur_octave < billow_noise->octave_count; cur_octave++) {
__m256 nx = make_int_32_range_avx(x_vec);
float ny = make_int_32_range(y);
float nz = make_int_32_range(z);
int cur_seed = (billow_noise->seed + cur_octave) & 0xffffffff;
__m256 signal = gradient_coherent_noise_3d_avx(nx, ny, nz, cur_seed, billow_noise->noise_quality);
signal = _mm256_sub_ps(_mm256_mul_ps(_mm256_set1_ps(2.0), _mm256_andnot_ps(_mm256_set1_ps(-0.0), signal)), _mm256_set1_ps(1.0));
value = _mm256_add_ps(value, _mm256_mul_ps(signal, _mm256_set1_ps(cur_persistence)));
x_vec = _mm256_mul_ps(x_vec, _mm256_set1_ps(billow_noise->lacunarity));
y *= billow_noise->lacunarity;
z *= billow_noise->lacunarity;
cur_persistence *= billow_noise->persistence;
}
value = _mm256_add_ps(value, _mm256_set1_ps(0.5));
_mm256_store_ps(noise_set + (x_dim + (y_dim * x_size) + (z_dim * (x_size * y_size))), value);
}
}
}
return noise_set;
}
static inline float *billow_noise_eval_3d_avx2(struct BillowNoise *billow_noise, size_t x_size, size_t y_size, size_t z_size) {
float *noise_set = noise_allocate(sizeof(__m256), sizeof(float) * x_size * y_size * z_size);
#pragma omp parallel for collapse(3) if (billow_noise->parallel)
for (int z_dim = 0; z_dim < z_size; z_dim++) {
for (int y_dim = 0; y_dim < y_size; y_dim++) {
for (int x_dim = 0; x_dim < x_size; x_dim += 8) {
__m256 x_vec = _mm256_mul_ps(_mm256_add_ps(_mm256_set1_ps(billow_noise->position[0]), _mm256_mul_ps(_mm256_set_ps(x_dim + 7.0, x_dim + 6.0, x_dim + 5.0, x_dim + 4.0, x_dim + 3.0, x_dim + 2.0, x_dim + 1.0, x_dim), _mm256_set1_ps(billow_noise->step))), _mm256_set1_ps(billow_noise->frequency));
float y = (billow_noise->position[1] + (y_dim * billow_noise->step)) * billow_noise->frequency;
float z = (billow_noise->position[2] + (z_dim * billow_noise->step)) * billow_noise->frequency;
__m256 value = _mm256_set1_ps(0.0);
float cur_persistence = 1.0;
for (int cur_octave = 0; cur_octave < billow_noise->octave_count; cur_octave++) {
__m256 nx = make_int_32_range_avx(x_vec);
float ny = make_int_32_range(y);
float nz = make_int_32_range(z);
int cur_seed = (billow_noise->seed + cur_octave) & 0xffffffff;
__m256 signal = gradient_coherent_noise_3d_avx2(nx, ny, nz, cur_seed, billow_noise->noise_quality);
signal = _mm256_sub_ps(_mm256_mul_ps(_mm256_set1_ps(2.0), _mm256_andnot_ps(_mm256_set1_ps(-0.0), signal)), _mm256_set1_ps(1.0));
value = _mm256_add_ps(value, _mm256_mul_ps(signal, _mm256_set1_ps(cur_persistence)));
x_vec = _mm256_mul_ps(x_vec, _mm256_set1_ps(billow_noise->lacunarity));
y *= billow_noise->lacunarity;
z *= billow_noise->lacunarity;
cur_persistence *= billow_noise->persistence;
}
value = _mm256_add_ps(value, _mm256_set1_ps(0.5));
_mm256_store_ps(noise_set + (x_dim + (y_dim * x_size) + (z_dim * (x_size * y_size))), value);
}
}
}
return noise_set;
}
#endif
#endif // BILLOW_NOISE_H
|
GB_binop__rdiv_uint32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rdiv_uint32)
// A.*B function (eWiseMult): GB (_AemultB_08__rdiv_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__rdiv_uint32)
// A.*B function (eWiseMult): GB (_AemultB_04__rdiv_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_uint32)
// A*D function (colscale): GB (_AxD__rdiv_uint32)
// D*A function (rowscale): GB (_DxB__rdiv_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__rdiv_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__rdiv_uint32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_uint32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_uint32)
// C=scalar+B GB (_bind1st__rdiv_uint32)
// C=scalar+B' GB (_bind1st_tran__rdiv_uint32)
// C=A+scalar GB (_bind2nd__rdiv_uint32)
// C=A'+scalar GB (_bind2nd_tran__rdiv_uint32)
// C type: uint32_t
// A type: uint32_t
// A pattern? 0
// B type: uint32_t
// B pattern? 0
// BinaryOp: cij = GB_IDIV_UNSIGNED (bij, aij, 32)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IDIV_UNSIGNED (y, x, 32) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RDIV || GxB_NO_UINT32 || GxB_NO_RDIV_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rdiv_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__rdiv_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rdiv_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rdiv_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rdiv_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rdiv_uint32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rdiv_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint32_t alpha_scalar ;
uint32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__rdiv_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rdiv_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__rdiv_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rdiv_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rdiv_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IDIV_UNSIGNED (bij, x, 32) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rdiv_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IDIV_UNSIGNED (y, aij, 32) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_UNSIGNED (aij, x, 32) ; \
}
GrB_Info GB (_bind1st_tran__rdiv_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_UNSIGNED (y, aij, 32) ; \
}
GrB_Info GB (_bind2nd_tran__rdiv_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
DRB054-inneronly2-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Example with loop-carried data dependence at the outer level loop.
The inner level loop can be parallelized.
*/
#include <omp.h>
int main()
{
int i;
int j;
int n = 100;
int m = 100;
double b[n][m];
#pragma omp parallel for private (i,j)
for (i = 0; i <= n - 1; i += 1) {
#pragma omp parallel for private (j)
for (j = 0; j <= n - 1; j += 1) {
b[i][j] = ((double )(i * j));
}
}
for (i = 1; i <= n - 1; i += 1) {
#pragma omp parallel for private (j)
for (j = 1; j <= m - 1; j += 1) {
b[i][j] = b[i - 1][j - 1];
}
}
for (i = 0; i <= n - 1; i += 1) {
for (j = 0; j <= n - 1; j += 1) {
printf("%lf\n",b[i][j]);
}
}
return 0;
}
|
logit_loss_delta.h | /**
* Copyright (c) 2015 by Contributors
*/
#ifndef ZDIFACTO_LOSS_LOGIT_LOSS_DELTA_H_
#define ZDIFACTO_LOSS_LOGIT_LOSS_DELTA_H_
#include <cmath>
#include <vector>
#include "zdifacto/loss.h"
#include "zdifacto/sarray.h"
#include "common/range.h"
#include "common/spmv.h"
#include "dmlc/omp.h"
#include "dmlc/logging.h"
namespace zdifacto {
/**
* \brief parameters for \ref LogitLossDelta
*/
struct LogitLossDeltaParam : public dmlc::Parameter<LogitLossDeltaParam> {
/**
* \brief if or not compute the hession matrix
* 0 : no
* 1 : diagnal hession matrix
* 2 : the upper bound of the diagnal hession
*/
int compute_hession;
DMLC_DECLARE_PARAMETER(LogitLossDeltaParam) {
DMLC_DECLARE_FIELD(compute_hession).set_range(0, 2).set_default(1);
}
};
/**
* \brief the logistic loss, specialized for block coordinate descent
*
* :math:`\ell(x,y,w) = log(1 + exp(- y <w, x>))`
*
* \ref LogitLossDelta is feeded with X' (the tranpose of X, in row-major
* format) and delta w each time, and is able to compute the second order
* gradients.
*
* Note: One can use \ref LogitLoss for ordinary logitis loss, namely given
* X and w each time.
*/
class LogitLossDelta : public Loss {
public:
/** \brief constructor */
LogitLossDelta() { }
/** \brief deconstructor */
virtual ~LogitLossDelta() { }
KWArgs Init(const KWArgs& kwargs) override {
return param_.InitAllowUnknown(kwargs);
}
/**
* @param data X', the transpose of X
*
* pred += X * delta_w
*
* @param param input parameters
* - param[1], real_t vector, the delta weight, namely new_w - old_w
* - param[2], optional int vector, the weight positions
* @param pred predict output, should be pre-allocated
*/
void Predict(const dmlc::RowBlock<unsigned>& data,
const std::vector<SArray<char>>& param,
SArray<real_t>* pred) override {
int psize = param.size();
CHECK_GE(psize, 1); CHECK_LE(psize, 2);
SArray<real_t> delta_w(param[0]);
SArray<int> w_pos = psize == 2 ? SArray<int>(param[1]) : SArray<int>();
SpMV::TransTimes(data, delta_w, pred, nthreads_, w_pos, {});
}
/**
* \brief compute the gradients
*
* tau = 1 / (1 + exp(y .* pred))
* first order grad
* f'(w) = - X' * (tau .* y)
* diagnal second order grad :
* f''(w) = (X.*X)' * (tau .* (1-tau))
*
* @param data X', the transpose of X
* @param param input parameters
* - param[0], real_t vector, the predict output
* - param[1], optional int vector, the gradient positions
* - param[2], optional real_t vectorreal_t, the delta needed if
* compute_diag_hession == 2
* @param grad gradient output, should be preallocated
*/
void CalcGrad(const dmlc::RowBlock<unsigned>& data,
const std::vector<SArray<char>>& param,
SArray<real_t>* grad) override {
int psize = param.size();
CHECK_GE(psize, 1);
CHECK_LE(psize, 3);
if (grad->empty()) return;
// p = ...
SArray<real_t> p; p.CopyFrom(SArray<real_t>(param[0]));
CHECK_NOTNULL(data.label);
#pragma omp parallel for num_threads(nthreads_)
for (size_t i = 0; i < p.size(); ++i) {
real_t y = data.label[i] > 0 ? 1 : -1;
p[i] = - y / (1 + std::exp(y * p[i]));
}
// grad = ...
SArray<int> grad_pos = psize > 1 ? SArray<int>(param[1]) : SArray<int>();
if (param_.compute_hession != 0) CHECK(!grad_pos.empty());
SpMV::Times(data, p, grad, nthreads_, {}, grad_pos);
if (param_.compute_hession == 0) return;
// h = ...
SArray<int> h_pos; h_pos.CopyFrom(grad_pos);
for (size_t i = 0; i < h_pos.size(); ++i) {
if (h_pos[i] >= 0) ++h_pos[i];
}
// compute X .* X
dmlc::RowBlock<unsigned> XX = data;
SArray<dmlc::real_t> xx_value;
if (data.value) {
xx_value.resize(data.offset[data.size]);
for (size_t i = data.offset[0]; i < data.offset[data.size]; ++i) {
xx_value[i] = data.value[i] * data.value[i];
}
XX.value = xx_value.data();
}
// p = tau * (1 - tau)
#pragma omp parallel for num_threads(nthreads_)
for (size_t i = 0; i < p.size(); ++i) {
real_t y = data.label[i] > 0 ? 1 : -1;
p[i] = - p[i] * (y + p[i]);
}
if (param_.compute_hession == 1) {
SpMV::Times(XX, p, grad, nthreads_, {}, h_pos);
} else if (param_.compute_hession == 2) {
LOG(FATAL) << "...";
CHECK_EQ(psize, 3);
SArray<real_t> delta(param[2]);
// TODO(mli)
} else {
LOG(FATAL) << "...";
}
}
private:
LogitLossDeltaParam param_;
};
} // namespace difacto
#endif // DIFACTO_LOSS_LOGIT_LOSS_DELTA_H_
|
Fig_4.10_crit.c | // sample compile command: "gcc -fopenmp -c Fig_4.10_crit.c" to generate *.o object file
#include <stdio.h>
#include <omp.h>
int main()
{
float res = 0.0;
int niters = 1000;;
#pragma omp parallel
{
float B; int i, id, nthrds;
id = omp_get_thread_num();
nthrds = omp_get_num_threads();
for (i = id; i < niters; i += nthrds) {
B = big_job(i);
#pragma omp critical
res += consume(B);
}
} // end of parallel region
}
|
ej4.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <unistd.h>
#define TAM 1024
void rellenarArray(float *V){
for(int i=0;i<TAM;++i)
*(V+i)=100.0f;
}
int main() {
int number;
double start;
float *x = (float *)malloc(sizeof(float)*TAM);
float *y = (float *)malloc(sizeof(float)*TAM);
float alpha=10;
#pragma omp parallel num_threads(2)
{
#pragma omp sections
{
#pragma omp section
rellenarArray(x);
#pragma omp section
rellenarArray(y);
}
}
//2 Hilos
number=2;
start = omp_get_wtime();
//PROGRAMA
#pragma omp parallel for num_threads(number)
for(int i=0; i<TAM; ++i)
*(y+i)=(*(x+i)*alpha)+*(y+i);
//FIN PROGRAMA
printf("\n-------------------------------------------\nTiempo de ejecucion del programa con %i hilos: %lfs\n-------------------------------------------\n",number,omp_get_wtime()-start);
//4 Hilos
number=4;
start = omp_get_wtime();
//PROGRAMA
#pragma omp parallel for num_threads(number)
for(int i=0; i<TAM; ++i)
*(y+i)=(*(x+i)*alpha)+*(y+i);
//FIN PROGRAMA
printf("\n-------------------------------------------\nTiempo de ejecucion del programa con %i hilos: %lfs\n-------------------------------------------\n",number,omp_get_wtime()-start);
//6 Hilos
number=6;
start = omp_get_wtime();
//PROGRAMA
#pragma omp parallel for num_threads(number)
for(int i=0; i<TAM; ++i)
*(y+i)=(*(x+i)*alpha)+*(y+i);
//FIN PROGRAMA
printf("\n-------------------------------------------\nTiempo de ejecucion del programa con %i hilos: %lfs\n-------------------------------------------\n",number,omp_get_wtime()-start);
//8 Hilos
number=8;
start = omp_get_wtime();
//PROGRAMA
#pragma omp parallel for num_threads(number)
for(int i=0; i<TAM; ++i)
*(y+i)=(*(x+i)*alpha)+*(y+i);
//FIN PROGRAMA
printf("\n-------------------------------------------\nTiempo de ejecucion del programa con %i hilos: %lfs\n-------------------------------------------\n",number,omp_get_wtime()-start);
return 0;
}
|
dropout_op.h | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <cstring>
#include <random>
#include <string>
#include <algorithm>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/platform/gpu_launch_config.h"
namespace paddle {
namespace operators {
// aligned vector generates vectorized load/store on CUDA
template <typename T, int Size>
struct alignas(sizeof(T) * Size) AlignedVector {
T val[Size];
};
template <typename T>
inline int VectorizedSize(const T* pointer) {
uint64_t address = reinterpret_cast<uint64_t>(pointer);
constexpr int vec4 = std::alignment_of<AlignedVector<T, 4>>::value; // NOLINT
if (address % vec4 == 0) {
return 4;
}
return 1;
}
#if defined(__NVCC__) || defined(__HIPCC__)
template <typename T, typename MaskType, int VecSize>
__global__ void DropoutGradCUDAKernel(const T* dout, const MaskType* mask,
const T factor, const int64_t size,
T* dx) {
int64_t idx = blockDim.x * blockIdx.x + threadIdx.x;
using LoadT = AlignedVector<T, VecSize>;
using MaskLoadT = AlignedVector<MaskType, VecSize>;
for (int i = idx * VecSize; i < size; i += blockDim.x * gridDim.x * VecSize) {
T dout_vec[VecSize];
LoadT* dout_value = reinterpret_cast<LoadT*>(&dout_vec);
*dout_value = *reinterpret_cast<const LoadT*>(&dout[i]);
MaskType mask_vec[VecSize];
MaskLoadT* mask_value = reinterpret_cast<MaskLoadT*>(&mask_vec);
*mask_value = *reinterpret_cast<const MaskLoadT*>(&mask[i]);
T dx_vec[VecSize];
#pragma unroll
for (int ii = 0; ii < VecSize; ii++) {
dx_vec[ii] = dout_vec[ii] * static_cast<T>(mask_vec[ii]) * factor;
}
*(reinterpret_cast<LoadT*>(&dx[i])) = *reinterpret_cast<LoadT*>(&dx_vec[0]);
}
}
#endif
using Tensor = framework::Tensor;
template <typename T, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenMatrix = framework::EigenMatrix<T, MajorType, IndexType>;
template <typename T, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenVector = framework::EigenVector<T, MajorType, IndexType>;
template <typename DeviceContext, typename T>
class CPUDropoutKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* x = context.Input<Tensor>("X");
auto* seed =
context.HasInput("Seed") ? context.Input<Tensor>("Seed") : nullptr;
auto* y = context.Output<Tensor>("Out");
const auto* x_data = x->data<T>();
auto* y_data = y->mutable_data<T>(context.GetPlace());
float dropout_prob = context.Attr<float>("dropout_prob");
auto& dropout_implementation =
context.Attr<std::string>("dropout_implementation");
bool upscale_in_train = (dropout_implementation == "upscale_in_train");
if (!context.Attr<bool>("is_test")) {
auto* mask = context.Output<Tensor>("Mask");
auto* mask_data = mask->mutable_data<uint8_t>(context.GetPlace());
size_t size = framework::product(mask->dims());
// Special case when dropout_prob is 1.0
if (dropout_prob == 1.0f) {
std::memset(y_data, 0, size * sizeof(*y_data)); // NOLINT
std::memset(mask_data, 0, size * sizeof(*mask_data)); // NOLINT
return;
}
// std::minstd_rand engine;
// NOTE: fixed seed should only be used in unittest or for debug.
// Guarantee to use random seed in training.
int seed_data = 0;
if (seed) {
seed_data = *(seed->data<int>());
} else {
seed_data =
context.Attr<bool>("fix_seed") ? context.Attr<int>("seed") : 0;
}
auto engine = framework::GetCPURandomEngine(seed_data);
std::uniform_real_distribution<float> dist(0, 1);
for (size_t i = 0; i < size; ++i) {
if (dist(*engine) < dropout_prob) {
mask_data[i] = 0;
y_data[i] = 0;
} else {
mask_data[i] = 1;
if (upscale_in_train) {
y_data[i] = x_data[i] / static_cast<T>(1.0f - dropout_prob);
} else {
y_data[i] = x_data[i];
}
}
}
} else {
if (upscale_in_train) {
const auto* X_data = x->data<T>();
auto* Y_data = y->mutable_data<T>(context.GetPlace());
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int i = 0; i < x->numel(); i++) {
Y_data[i] = X_data[i];
}
} else {
auto X = EigenMatrix<T>::Reshape(*x, 1);
auto Y = EigenMatrix<T>::Reshape(*y, 1);
auto& place =
*context.template device_context<DeviceContext>().eigen_device();
Y.device(place) = X * static_cast<T>(1.0f - dropout_prob);
}
}
}
};
template <typename DeviceContext, typename T>
class DropoutGradKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* grad_x = context.Output<Tensor>(framework::GradVarName("X"));
auto* grad_y = context.Input<Tensor>(framework::GradVarName("Out"));
auto* mask = context.Input<Tensor>("Mask");
grad_x->mutable_data<T>(context.GetPlace());
auto size = grad_x->numel();
auto dX = EigenVector<T>::Flatten(*grad_x);
auto dY = EigenVector<T>::Flatten(*grad_y);
auto& place =
*context.template device_context<DeviceContext>().eigen_device();
auto& dropout_implementation =
context.Attr<std::string>("dropout_implementation");
if (context.Attr<bool>("is_test") == true) {
if (dropout_implementation == "upscale_in_train") {
dX.device(place) = static_cast<T>(1) * dY;
} else {
float dropout_prob = context.Attr<float>("dropout_prob");
dX.device(place) = dY * static_cast<T>(1.0f - dropout_prob);
}
} else {
auto M = EigenVector<uint8_t>::Flatten(*mask);
if (dropout_implementation == "upscale_in_train") {
float dropout_prob = context.Attr<float>("dropout_prob");
if (dropout_prob == 1.0f) {
dX.device(place) = static_cast<T>(0) * dY;
} else {
int vec_size = VectorizedSize<T>(grad_y->data<T>());
if (platform::is_gpu_place(context.GetPlace()) && vec_size == 4 &&
size % 4 == 0) {
#if defined(__NVCC__) || defined(__HIPCC__)
auto factor = static_cast<T>(1.0f / (1.0f - dropout_prob));
auto stream = context.cuda_device_context().stream();
platform::GpuLaunchConfig config = platform::GetGpuLaunchConfig1D(
context.cuda_device_context(), size);
DropoutGradCUDAKernel<T, uint8_t, 4><<<
config.block_per_grid, config.thread_per_block, 0, stream>>>(
grad_y->data<T>(), mask->data<uint8_t>(), factor, size,
grad_x->data<T>());
#endif
} else {
dX.device(place) =
dY * M.cast<T>() / static_cast<T>(1.0f - dropout_prob);
}
}
} else {
dX.device(place) = dY * M.cast<T>();
}
}
}
};
} // namespace operators
} // namespace paddle
|
Example_array_sections.3.c | /*
* @@name: array_sections.3c
* @@type: C
* @@compilable: yes
* @@linkable: no
* @@expect: success
* @@version: omp_4.0
*/
void foo ()
{
int A[30], *p;
#pragma omp target data map( A[0:4] )
{
p = &A[0];
#pragma omp target map( p[7:20] )
{
A[2] = 0;
p[8] = 0;
}
}
}
|
core.c | /* Generated by Cython 0.29.24 */
/* BEGIN: Cython Metadata
{
"distutils": {
"name": "monotonic_align.core",
"sources": [
"/home/hansenm/opt/vits-train/vits_train/monotonic_align/core.pyx"
]
},
"module_name": "monotonic_align.core"
}
END: Cython Metadata */
#ifndef PY_SSIZE_T_CLEAN
#define PY_SSIZE_T_CLEAN
#endif /* PY_SSIZE_T_CLEAN */
#include "Python.h"
#ifndef Py_PYTHON_H
#error Python headers needed to compile C extensions, please install development version of Python.
#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000)
#error Cython requires Python 2.6+ or Python 3.3+.
#else
#define CYTHON_ABI "0_29_24"
#define CYTHON_HEX_VERSION 0x001D18F0
#define CYTHON_FUTURE_DIVISION 0
#include <stddef.h>
#ifndef offsetof
#define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
#endif
#if !defined(WIN32) && !defined(MS_WINDOWS)
#ifndef __stdcall
#define __stdcall
#endif
#ifndef __cdecl
#define __cdecl
#endif
#ifndef __fastcall
#define __fastcall
#endif
#endif
#ifndef DL_IMPORT
#define DL_IMPORT(t) t
#endif
#ifndef DL_EXPORT
#define DL_EXPORT(t) t
#endif
#define __PYX_COMMA ,
#ifndef HAVE_LONG_LONG
#if PY_VERSION_HEX >= 0x02070000
#define HAVE_LONG_LONG
#endif
#endif
#ifndef PY_LONG_LONG
#define PY_LONG_LONG LONG_LONG
#endif
#ifndef Py_HUGE_VAL
#define Py_HUGE_VAL HUGE_VAL
#endif
#ifdef PYPY_VERSION
#define CYTHON_COMPILING_IN_PYPY 1
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 0
#undef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 0
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#if PY_VERSION_HEX < 0x03050000
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#elif !defined(CYTHON_USE_ASYNC_SLOTS)
#define CYTHON_USE_ASYNC_SLOTS 1
#endif
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#undef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 0
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#undef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 1
#undef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 0
#undef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 0
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#undef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT 0
#undef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE 0
#undef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS 0
#undef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK 0
#elif defined(PYSTON_VERSION)
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 1
#define CYTHON_COMPILING_IN_CPYTHON 0
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#undef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT 0
#undef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE 0
#undef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS 0
#undef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK 0
#else
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 1
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#elif !defined(CYTHON_USE_PYTYPE_LOOKUP)
#define CYTHON_USE_PYTYPE_LOOKUP 1
#endif
#if PY_MAJOR_VERSION < 3
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#elif !defined(CYTHON_USE_ASYNC_SLOTS)
#define CYTHON_USE_ASYNC_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#elif !defined(CYTHON_USE_PYLONG_INTERNALS)
#define CYTHON_USE_PYLONG_INTERNALS 1
#endif
#ifndef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 1
#endif
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#if PY_VERSION_HEX < 0x030300F0
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#elif !defined(CYTHON_USE_UNICODE_WRITER)
#define CYTHON_USE_UNICODE_WRITER 1
#endif
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#ifndef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 1
#endif
#ifndef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 1
#endif
#ifndef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000)
#endif
#ifndef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1)
#endif
#ifndef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1)
#endif
#ifndef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3)
#endif
#endif
#if !defined(CYTHON_FAST_PYCCALL)
#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1)
#endif
#if CYTHON_USE_PYLONG_INTERNALS
#include "longintrepr.h"
#undef SHIFT
#undef BASE
#undef MASK
#ifdef SIZEOF_VOID_P
enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) };
#endif
#endif
#ifndef __has_attribute
#define __has_attribute(x) 0
#endif
#ifndef __has_cpp_attribute
#define __has_cpp_attribute(x) 0
#endif
#ifndef CYTHON_RESTRICT
#if defined(__GNUC__)
#define CYTHON_RESTRICT __restrict__
#elif defined(_MSC_VER) && _MSC_VER >= 1400
#define CYTHON_RESTRICT __restrict
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_RESTRICT restrict
#else
#define CYTHON_RESTRICT
#endif
#endif
#ifndef CYTHON_UNUSED
# if defined(__GNUC__)
# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
#endif
#ifndef CYTHON_MAYBE_UNUSED_VAR
# if defined(__cplusplus)
template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { }
# else
# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x)
# endif
#endif
#ifndef CYTHON_NCP_UNUSED
# if CYTHON_COMPILING_IN_CPYTHON
# define CYTHON_NCP_UNUSED
# else
# define CYTHON_NCP_UNUSED CYTHON_UNUSED
# endif
#endif
#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)
#ifdef _MSC_VER
#ifndef _MSC_STDINT_H_
#if _MSC_VER < 1300
typedef unsigned char uint8_t;
typedef unsigned int uint32_t;
#else
typedef unsigned __int8 uint8_t;
typedef unsigned __int32 uint32_t;
#endif
#endif
#else
#include <stdint.h>
#endif
#ifndef CYTHON_FALLTHROUGH
#if defined(__cplusplus) && __cplusplus >= 201103L
#if __has_cpp_attribute(fallthrough)
#define CYTHON_FALLTHROUGH [[fallthrough]]
#elif __has_cpp_attribute(clang::fallthrough)
#define CYTHON_FALLTHROUGH [[clang::fallthrough]]
#elif __has_cpp_attribute(gnu::fallthrough)
#define CYTHON_FALLTHROUGH [[gnu::fallthrough]]
#endif
#endif
#ifndef CYTHON_FALLTHROUGH
#if __has_attribute(fallthrough)
#define CYTHON_FALLTHROUGH __attribute__((fallthrough))
#else
#define CYTHON_FALLTHROUGH
#endif
#endif
#if defined(__clang__ ) && defined(__apple_build_version__)
#if __apple_build_version__ < 7000000
#undef CYTHON_FALLTHROUGH
#define CYTHON_FALLTHROUGH
#endif
#endif
#endif
#ifndef CYTHON_INLINE
#if defined(__clang__)
#define CYTHON_INLINE __inline__ __attribute__ ((__unused__))
#elif defined(__GNUC__)
#define CYTHON_INLINE __inline__
#elif defined(_MSC_VER)
#define CYTHON_INLINE __inline
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_INLINE inline
#else
#define CYTHON_INLINE
#endif
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag)
#define Py_OptimizeFlag 0
#endif
#define __PYX_BUILD_PY_SSIZE_T "n"
#define CYTHON_FORMAT_SSIZE_T "z"
#if PY_MAJOR_VERSION < 3
#define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyClass_Type
#else
#define __Pyx_BUILTIN_MODULE_NAME "builtins"
#if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#else
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#endif
#define __Pyx_DefaultClassType PyType_Type
#endif
#ifndef Py_TPFLAGS_CHECKTYPES
#define Py_TPFLAGS_CHECKTYPES 0
#endif
#ifndef Py_TPFLAGS_HAVE_INDEX
#define Py_TPFLAGS_HAVE_INDEX 0
#endif
#ifndef Py_TPFLAGS_HAVE_NEWBUFFER
#define Py_TPFLAGS_HAVE_NEWBUFFER 0
#endif
#ifndef Py_TPFLAGS_HAVE_FINALIZE
#define Py_TPFLAGS_HAVE_FINALIZE 0
#endif
#ifndef METH_STACKLESS
#define METH_STACKLESS 0
#endif
#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL)
#ifndef METH_FASTCALL
#define METH_FASTCALL 0x80
#endif
typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs);
typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args,
Py_ssize_t nargs, PyObject *kwnames);
#else
#define __Pyx_PyCFunctionFast _PyCFunctionFast
#define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords
#endif
#if CYTHON_FAST_PYCCALL
#define __Pyx_PyFastCFunction_Check(func)\
((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)))))
#else
#define __Pyx_PyFastCFunction_Check(func) 0
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc)
#define PyObject_Malloc(s) PyMem_Malloc(s)
#define PyObject_Free(p) PyMem_Free(p)
#define PyObject_Realloc(p) PyMem_Realloc(p)
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1
#define PyMem_RawMalloc(n) PyMem_Malloc(n)
#define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n)
#define PyMem_RawFree(p) PyMem_Free(p)
#endif
#if CYTHON_COMPILING_IN_PYSTON
#define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno)
#else
#define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno)
#endif
#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000
#define __Pyx_PyThreadState_Current PyThreadState_GET()
#elif PY_VERSION_HEX >= 0x03060000
#define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet()
#elif PY_VERSION_HEX >= 0x03000000
#define __Pyx_PyThreadState_Current PyThreadState_GET()
#else
#define __Pyx_PyThreadState_Current _PyThreadState_Current
#endif
#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT)
#include "pythread.h"
#define Py_tss_NEEDS_INIT 0
typedef int Py_tss_t;
static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) {
*key = PyThread_create_key();
return 0;
}
static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) {
Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t));
*key = Py_tss_NEEDS_INIT;
return key;
}
static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) {
PyObject_Free(key);
}
static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) {
return *key != Py_tss_NEEDS_INIT;
}
static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) {
PyThread_delete_key(*key);
*key = Py_tss_NEEDS_INIT;
}
static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) {
return PyThread_set_key_value(*key, value);
}
static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) {
return PyThread_get_key_value(*key);
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized)
#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n))
#else
#define __Pyx_PyDict_NewPresized(n) PyDict_New()
#endif
#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION
#define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
#else
#define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS
#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash)
#else
#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name)
#endif
#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
#define CYTHON_PEP393_ENABLED 1
#if defined(PyUnicode_IS_READY)
#define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\
0 : _PyUnicode_Ready((PyObject *)(op)))
#else
#define __Pyx_PyUnicode_READY(op) (0)
#endif
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u)
#define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u)
#define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u)
#define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i)
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch)
#if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE)
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03090000
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : ((PyCompactUnicodeObject *)(u))->wstr_length))
#else
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u)))
#endif
#else
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u))
#endif
#else
#define CYTHON_PEP393_ENABLED 0
#define PyUnicode_1BYTE_KIND 1
#define PyUnicode_2BYTE_KIND 2
#define PyUnicode_4BYTE_KIND 4
#define __Pyx_PyUnicode_READY(op) (0)
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i]))
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111)
#define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE))
#define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u))
#define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i]))
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch)
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u))
#endif
#if CYTHON_COMPILING_IN_PYPY
#define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b)
#else
#define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\
PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains)
#define PyUnicode_Contains(u, s) PySequence_Contains(u, s)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check)
#define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format)
#define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt)
#endif
#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b)
#else
#define __Pyx_PyString_Format(a, b) PyString_Format(a, b)
#endif
#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII)
#define PyObject_ASCII(o) PyObject_Repr(o)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBaseString_Type PyUnicode_Type
#define PyStringObject PyUnicodeObject
#define PyString_Type PyUnicode_Type
#define PyString_Check PyUnicode_Check
#define PyString_CheckExact PyUnicode_CheckExact
#ifndef PyObject_Unicode
#define PyObject_Unicode PyObject_Str
#endif
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
#define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
#else
#define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj))
#define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))
#endif
#ifndef PySet_CheckExact
#define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
#endif
#if PY_VERSION_HEX >= 0x030900A4
#define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt)
#define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size)
#else
#define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt)
#define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size)
#endif
#if CYTHON_ASSUME_SAFE_MACROS
#define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq)
#else
#define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyIntObject PyLongObject
#define PyInt_Type PyLong_Type
#define PyInt_Check(op) PyLong_Check(op)
#define PyInt_CheckExact(op) PyLong_CheckExact(op)
#define PyInt_FromString PyLong_FromString
#define PyInt_FromUnicode PyLong_FromUnicode
#define PyInt_FromLong PyLong_FromLong
#define PyInt_FromSize_t PyLong_FromSize_t
#define PyInt_FromSsize_t PyLong_FromSsize_t
#define PyInt_AsLong PyLong_AsLong
#define PyInt_AS_LONG PyLong_AS_LONG
#define PyInt_AsSsize_t PyLong_AsSsize_t
#define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
#define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
#define PyNumber_Int PyNumber_Long
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBoolObject PyLongObject
#endif
#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY
#ifndef PyUnicode_InternFromString
#define PyUnicode_InternFromString(s) PyUnicode_FromString(s)
#endif
#endif
#if PY_VERSION_HEX < 0x030200A4
typedef long Py_hash_t;
#define __Pyx_PyInt_FromHash_t PyInt_FromLong
#define __Pyx_PyInt_AsHash_t PyInt_AsLong
#else
#define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
#define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func))
#else
#define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass)
#endif
#if CYTHON_USE_ASYNC_SLOTS
#if PY_VERSION_HEX >= 0x030500B1
#define __Pyx_PyAsyncMethodsStruct PyAsyncMethods
#define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async)
#else
#define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved))
#endif
#else
#define __Pyx_PyType_AsAsync(obj) NULL
#endif
#ifndef __Pyx_PyAsyncMethodsStruct
typedef struct {
unaryfunc am_await;
unaryfunc am_aiter;
unaryfunc am_anext;
} __Pyx_PyAsyncMethodsStruct;
#endif
#if defined(WIN32) || defined(MS_WINDOWS)
#define _USE_MATH_DEFINES
#endif
#include <math.h>
#ifdef NAN
#define __PYX_NAN() ((float) NAN)
#else
static CYTHON_INLINE float __PYX_NAN() {
float value;
memset(&value, 0xFF, sizeof(value));
return value;
}
#endif
#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL)
#define __Pyx_truncl trunc
#else
#define __Pyx_truncl truncl
#endif
#define __PYX_MARK_ERR_POS(f_index, lineno) \
{ __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; }
#define __PYX_ERR(f_index, lineno, Ln_error) \
{ __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; }
#ifndef __PYX_EXTERN_C
#ifdef __cplusplus
#define __PYX_EXTERN_C extern "C"
#else
#define __PYX_EXTERN_C extern
#endif
#endif
#define __PYX_HAVE__monotonic_align__core
#define __PYX_HAVE_API__monotonic_align__core
/* Early includes */
#include "pythread.h"
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include "pystate.h"
#ifdef _OPENMP
#include <omp.h>
#endif /* _OPENMP */
#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS)
#define CYTHON_WITHOUT_ASSERTIONS
#endif
typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding;
const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry;
#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8)
#define __PYX_DEFAULT_STRING_ENCODING ""
#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#define __Pyx_uchar_cast(c) ((unsigned char)c)
#define __Pyx_long_cast(x) ((long)x)
#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\
(sizeof(type) < sizeof(Py_ssize_t)) ||\
(sizeof(type) > sizeof(Py_ssize_t) &&\
likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX) &&\
(!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\
v == (type)PY_SSIZE_T_MIN))) ||\
(sizeof(type) == sizeof(Py_ssize_t) &&\
(is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX))) )
static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) {
return (size_t) i < (size_t) limit;
}
#if defined (__cplusplus) && __cplusplus >= 201103L
#include <cstdlib>
#define __Pyx_sst_abs(value) std::abs(value)
#elif SIZEOF_INT >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) abs(value)
#elif SIZEOF_LONG >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) labs(value)
#elif defined (_MSC_VER)
#define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value))
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define __Pyx_sst_abs(value) llabs(value)
#elif defined (__GNUC__)
#define __Pyx_sst_abs(value) __builtin_llabs(value)
#else
#define __Pyx_sst_abs(value) ((value<0) ? -value : value)
#endif
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*);
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))
#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l)
#define __Pyx_PyBytes_FromString PyBytes_FromString
#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*);
#if PY_MAJOR_VERSION < 3
#define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#else
#define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize
#endif
#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s))
#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s)
#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s)
#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s)
#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s)
#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s)
static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) {
const Py_UNICODE *u_end = u;
while (*u_end++) ;
return (size_t)(u_end - u - 1);
}
#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode
#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj)
#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None)
static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b);
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*);
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x);
#define __Pyx_PySequence_Tuple(obj)\
(likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj))
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
#if CYTHON_ASSUME_SAFE_MACROS
#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
#else
#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)
#endif
#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x))
#else
#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x))
#endif
#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x))
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
static int __Pyx_sys_getdefaultencoding_not_ascii;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
PyObject* ascii_chars_u = NULL;
PyObject* ascii_chars_b = NULL;
const char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
if (strcmp(default_encoding_c, "ascii") == 0) {
__Pyx_sys_getdefaultencoding_not_ascii = 0;
} else {
char ascii_chars[128];
int c;
for (c = 0; c < 128; c++) {
ascii_chars[c] = c;
}
__Pyx_sys_getdefaultencoding_not_ascii = 1;
ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL);
if (!ascii_chars_u) goto bad;
ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL);
if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) {
PyErr_Format(
PyExc_ValueError,
"This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.",
default_encoding_c);
goto bad;
}
Py_DECREF(ascii_chars_u);
Py_DECREF(ascii_chars_b);
}
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
Py_XDECREF(ascii_chars_u);
Py_XDECREF(ascii_chars_b);
return -1;
}
#endif
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)
#else
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
static char* __PYX_DEFAULT_STRING_ENCODING;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
__PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1);
if (!__PYX_DEFAULT_STRING_ENCODING) goto bad;
strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c);
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
return -1;
}
#endif
#endif
/* Test for GCC > 2.95 */
#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#else /* !__GNUC__ or GCC < 2.95 */
#define likely(x) (x)
#define unlikely(x) (x)
#endif /* __GNUC__ */
static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; }
static PyObject *__pyx_m = NULL;
static PyObject *__pyx_d;
static PyObject *__pyx_b;
static PyObject *__pyx_cython_runtime = NULL;
static PyObject *__pyx_empty_tuple;
static PyObject *__pyx_empty_bytes;
static PyObject *__pyx_empty_unicode;
static int __pyx_lineno;
static int __pyx_clineno = 0;
static const char * __pyx_cfilenm= __FILE__;
static const char *__pyx_filename;
static const char *__pyx_f[] = {
"vits_train/monotonic_align/core.pyx",
"stringsource",
};
/* NoFastGil.proto */
#define __Pyx_PyGILState_Ensure PyGILState_Ensure
#define __Pyx_PyGILState_Release PyGILState_Release
#define __Pyx_FastGIL_Remember()
#define __Pyx_FastGIL_Forget()
#define __Pyx_FastGilFuncInit()
/* MemviewSliceStruct.proto */
struct __pyx_memoryview_obj;
typedef struct {
struct __pyx_memoryview_obj *memview;
char *data;
Py_ssize_t shape[8];
Py_ssize_t strides[8];
Py_ssize_t suboffsets[8];
} __Pyx_memviewslice;
#define __Pyx_MemoryView_Len(m) (m.shape[0])
/* Atomics.proto */
#include <pythread.h>
#ifndef CYTHON_ATOMICS
#define CYTHON_ATOMICS 1
#endif
#define __pyx_atomic_int_type int
#if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 ||\
(__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) &&\
!defined(__i386__)
#define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1)
#define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1)
#ifdef __PYX_DEBUG_ATOMICS
#warning "Using GNU atomics"
#endif
#elif CYTHON_ATOMICS && defined(_MSC_VER) && 0
#include <Windows.h>
#undef __pyx_atomic_int_type
#define __pyx_atomic_int_type LONG
#define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value)
#define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value)
#ifdef __PYX_DEBUG_ATOMICS
#pragma message ("Using MSVC atomics")
#endif
#elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0
#define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value)
#define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value)
#ifdef __PYX_DEBUG_ATOMICS
#warning "Using Intel atomics"
#endif
#else
#undef CYTHON_ATOMICS
#define CYTHON_ATOMICS 0
#ifdef __PYX_DEBUG_ATOMICS
#warning "Not using atomics"
#endif
#endif
typedef volatile __pyx_atomic_int_type __pyx_atomic_int;
#if CYTHON_ATOMICS
#define __pyx_add_acquisition_count(memview)\
__pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock)
#define __pyx_sub_acquisition_count(memview)\
__pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock)
#else
#define __pyx_add_acquisition_count(memview)\
__pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock)
#define __pyx_sub_acquisition_count(memview)\
__pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock)
#endif
/* ForceInitThreads.proto */
#ifndef __PYX_FORCE_INIT_THREADS
#define __PYX_FORCE_INIT_THREADS 0
#endif
/* BufferFormatStructs.proto */
#define IS_UNSIGNED(type) (((type) -1) > 0)
struct __Pyx_StructField_;
#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0)
typedef struct {
const char* name;
struct __Pyx_StructField_* fields;
size_t size;
size_t arraysize[8];
int ndim;
char typegroup;
char is_unsigned;
int flags;
} __Pyx_TypeInfo;
typedef struct __Pyx_StructField_ {
__Pyx_TypeInfo* type;
const char* name;
size_t offset;
} __Pyx_StructField;
typedef struct {
__Pyx_StructField* field;
size_t parent_offset;
} __Pyx_BufFmt_StackElem;
typedef struct {
__Pyx_StructField root;
__Pyx_BufFmt_StackElem* head;
size_t fmt_offset;
size_t new_count, enc_count;
size_t struct_alignment;
int is_complex;
char enc_type;
char new_packmode;
char enc_packmode;
char is_valid_array;
} __Pyx_BufFmt_Context;
/*--- Type declarations ---*/
struct __pyx_array_obj;
struct __pyx_MemviewEnum_obj;
struct __pyx_memoryview_obj;
struct __pyx_memoryviewslice_obj;
struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each;
/* "monotonic_align/core.pyx":7
* @cython.boundscheck(False)
* @cython.wraparound(False)
* cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<<
* cdef int x
* cdef int y
*/
struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each {
int __pyx_n;
float max_neg_val;
};
/* "View.MemoryView":105
*
* @cname("__pyx_array")
* cdef class array: # <<<<<<<<<<<<<<
*
* cdef:
*/
struct __pyx_array_obj {
PyObject_HEAD
struct __pyx_vtabstruct_array *__pyx_vtab;
char *data;
Py_ssize_t len;
char *format;
int ndim;
Py_ssize_t *_shape;
Py_ssize_t *_strides;
Py_ssize_t itemsize;
PyObject *mode;
PyObject *_format;
void (*callback_free_data)(void *);
int free_data;
int dtype_is_object;
};
/* "View.MemoryView":279
*
* @cname('__pyx_MemviewEnum')
* cdef class Enum(object): # <<<<<<<<<<<<<<
* cdef object name
* def __init__(self, name):
*/
struct __pyx_MemviewEnum_obj {
PyObject_HEAD
PyObject *name;
};
/* "View.MemoryView":330
*
* @cname('__pyx_memoryview')
* cdef class memoryview(object): # <<<<<<<<<<<<<<
*
* cdef object obj
*/
struct __pyx_memoryview_obj {
PyObject_HEAD
struct __pyx_vtabstruct_memoryview *__pyx_vtab;
PyObject *obj;
PyObject *_size;
PyObject *_array_interface;
PyThread_type_lock lock;
__pyx_atomic_int acquisition_count[2];
__pyx_atomic_int *acquisition_count_aligned_p;
Py_buffer view;
int flags;
int dtype_is_object;
__Pyx_TypeInfo *typeinfo;
};
/* "View.MemoryView":965
*
* @cname('__pyx_memoryviewslice')
* cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<<
* "Internal class for passing memoryview slices to Python"
*
*/
struct __pyx_memoryviewslice_obj {
struct __pyx_memoryview_obj __pyx_base;
__Pyx_memviewslice from_slice;
PyObject *from_object;
PyObject *(*to_object_func)(char *);
int (*to_dtype_func)(char *, PyObject *);
};
/* "View.MemoryView":105
*
* @cname("__pyx_array")
* cdef class array: # <<<<<<<<<<<<<<
*
* cdef:
*/
struct __pyx_vtabstruct_array {
PyObject *(*get_memview)(struct __pyx_array_obj *);
};
static struct __pyx_vtabstruct_array *__pyx_vtabptr_array;
/* "View.MemoryView":330
*
* @cname('__pyx_memoryview')
* cdef class memoryview(object): # <<<<<<<<<<<<<<
*
* cdef object obj
*/
struct __pyx_vtabstruct_memoryview {
char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *);
PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *);
PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *);
PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *);
};
static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview;
/* "View.MemoryView":965
*
* @cname('__pyx_memoryviewslice')
* cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<<
* "Internal class for passing memoryview slices to Python"
*
*/
struct __pyx_vtabstruct__memoryviewslice {
struct __pyx_vtabstruct_memoryview __pyx_base;
};
static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice;
/* --- Runtime support code (head) --- */
/* Refnanny.proto */
#ifndef CYTHON_REFNANNY
#define CYTHON_REFNANNY 0
#endif
#if CYTHON_REFNANNY
typedef struct {
void (*INCREF)(void*, PyObject*, int);
void (*DECREF)(void*, PyObject*, int);
void (*GOTREF)(void*, PyObject*, int);
void (*GIVEREF)(void*, PyObject*, int);
void* (*SetupContext)(const char*, int, const char*);
void (*FinishContext)(void**);
} __Pyx_RefNannyAPIStruct;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname);
#define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
#ifdef WITH_THREAD
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
if (acquire_gil) {\
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
PyGILState_Release(__pyx_gilstate_save);\
} else {\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
}
#else
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
#endif
#define __Pyx_RefNannyFinishContext()\
__Pyx_RefNanny->FinishContext(&__pyx_refnanny)
#define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
#define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
#define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
#define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
#else
#define __Pyx_RefNannyDeclarations
#define __Pyx_RefNannySetupContext(name, acquire_gil)
#define __Pyx_RefNannyFinishContext()
#define __Pyx_INCREF(r) Py_INCREF(r)
#define __Pyx_DECREF(r) Py_DECREF(r)
#define __Pyx_GOTREF(r)
#define __Pyx_GIVEREF(r)
#define __Pyx_XINCREF(r) Py_XINCREF(r)
#define __Pyx_XDECREF(r) Py_XDECREF(r)
#define __Pyx_XGOTREF(r)
#define __Pyx_XGIVEREF(r)
#endif
#define __Pyx_XDECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_XDECREF(tmp);\
} while (0)
#define __Pyx_DECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_DECREF(tmp);\
} while (0)
#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
/* PyObjectGetAttrStr.proto */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
#endif
/* GetBuiltinName.proto */
static PyObject *__Pyx_GetBuiltinName(PyObject *name);
/* MemviewSliceInit.proto */
#define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d
#define __Pyx_MEMVIEW_DIRECT 1
#define __Pyx_MEMVIEW_PTR 2
#define __Pyx_MEMVIEW_FULL 4
#define __Pyx_MEMVIEW_CONTIG 8
#define __Pyx_MEMVIEW_STRIDED 16
#define __Pyx_MEMVIEW_FOLLOW 32
#define __Pyx_IS_C_CONTIG 1
#define __Pyx_IS_F_CONTIG 2
static int __Pyx_init_memviewslice(
struct __pyx_memoryview_obj *memview,
int ndim,
__Pyx_memviewslice *memviewslice,
int memview_is_new_reference);
static CYTHON_INLINE int __pyx_add_acquisition_count_locked(
__pyx_atomic_int *acquisition_count, PyThread_type_lock lock);
static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(
__pyx_atomic_int *acquisition_count, PyThread_type_lock lock);
#define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p)
#define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview))
#define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__)
#define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__)
static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int);
static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int);
/* RaiseArgTupleInvalid.proto */
static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found);
/* RaiseDoubleKeywords.proto */
static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name);
/* ParseKeywords.proto */
static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\
PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\
const char* function_name);
/* None.proto */
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname);
/* ArgTypeTest.proto */
#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\
((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\
__Pyx__ArgTypeTest(obj, type, name, exact))
static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact);
/* PyObjectCall.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw);
#else
#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
#endif
/* PyThreadStateGet.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate;
#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current;
#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type
#else
#define __Pyx_PyThreadState_declare
#define __Pyx_PyThreadState_assign
#define __Pyx_PyErr_Occurred() PyErr_Occurred()
#endif
/* PyErrFetchRestore.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL)
#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL))
#else
#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
#endif
#else
#define __Pyx_PyErr_Clear() PyErr_Clear()
#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb)
#endif
/* RaiseException.proto */
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause);
/* PyCFunctionFastCall.proto */
#if CYTHON_FAST_PYCCALL
static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs);
#else
#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL)
#endif
/* PyFunctionFastCall.proto */
#if CYTHON_FAST_PYCALL
#define __Pyx_PyFunction_FastCall(func, args, nargs)\
__Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL)
#if 1 || PY_VERSION_HEX < 0x030600B1
static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs);
#else
#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs)
#endif
#define __Pyx_BUILD_ASSERT_EXPR(cond)\
(sizeof(char [1 - 2*!(cond)]) - 1)
#ifndef Py_MEMBER_SIZE
#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member)
#endif
static size_t __pyx_pyframe_localsplus_offset = 0;
#include "frameobject.h"
#define __Pxy_PyFrame_Initialize_Offsets()\
((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\
(void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus)))
#define __Pyx_PyFrame_GetLocalsplus(frame)\
(assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset))
#endif
/* PyObjectCall2Args.proto */
static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2);
/* PyObjectCallMethO.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg);
#endif
/* PyObjectCallOneArg.proto */
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg);
/* IncludeStringH.proto */
#include <string.h>
/* BytesEquals.proto */
static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals);
/* UnicodeEquals.proto */
static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals);
/* StrEquals.proto */
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals
#else
#define __Pyx_PyString_Equals __Pyx_PyBytes_Equals
#endif
/* None.proto */
static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t, Py_ssize_t);
/* UnaryNegOverflows.proto */
#define UNARY_NEG_WOULD_OVERFLOW(x)\
(((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x)))
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *); /*proto*/
/* GetAttr.proto */
static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *);
/* GetItemInt.proto */
#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\
(is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\
__Pyx_GetItemInt_Generic(o, to_py_func(i))))
#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
(PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
(PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j);
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i,
int is_list, int wraparound, int boundscheck);
/* ObjectGetItem.proto */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key);
#else
#define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key)
#endif
/* decode_c_string_utf16.proto */
static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16(const char *s, Py_ssize_t size, const char *errors) {
int byteorder = 0;
return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
}
static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16LE(const char *s, Py_ssize_t size, const char *errors) {
int byteorder = -1;
return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
}
static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16BE(const char *s, Py_ssize_t size, const char *errors) {
int byteorder = 1;
return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
}
/* decode_c_string.proto */
static CYTHON_INLINE PyObject* __Pyx_decode_c_string(
const char* cstring, Py_ssize_t start, Py_ssize_t stop,
const char* encoding, const char* errors,
PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors));
/* PyErrExceptionMatches.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err)
static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err);
#else
#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err)
#endif
/* GetAttr3.proto */
static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *);
/* PyDictVersioning.proto */
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1)
#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag)
#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\
(version_var) = __PYX_GET_DICT_VERSION(dict);\
(cache_var) = (value);
#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\
static PY_UINT64_T __pyx_dict_version = 0;\
static PyObject *__pyx_dict_cached_value = NULL;\
if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\
(VAR) = __pyx_dict_cached_value;\
} else {\
(VAR) = __pyx_dict_cached_value = (LOOKUP);\
__pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\
}\
}
static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj);
static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj);
static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version);
#else
#define __PYX_GET_DICT_VERSION(dict) (0)
#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)
#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP);
#endif
/* GetModuleGlobalName.proto */
#if CYTHON_USE_DICT_VERSIONS
#define __Pyx_GetModuleGlobalName(var, name) {\
static PY_UINT64_T __pyx_dict_version = 0;\
static PyObject *__pyx_dict_cached_value = NULL;\
(var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\
(likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\
__Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
}
#define __Pyx_GetModuleGlobalNameUncached(var, name) {\
PY_UINT64_T __pyx_dict_version;\
PyObject *__pyx_dict_cached_value;\
(var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
}
static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value);
#else
#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name)
#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name)
static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name);
#endif
/* RaiseTooManyValuesToUnpack.proto */
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected);
/* RaiseNeedMoreValuesToUnpack.proto */
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);
/* RaiseNoneIterError.proto */
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void);
/* ExtTypeTest.proto */
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type);
/* GetTopmostException.proto */
#if CYTHON_USE_EXC_INFO_STACK
static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate);
#endif
/* SaveResetException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
#else
#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb)
#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb)
#endif
/* GetException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb)
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb);
#endif
/* SwapException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#else
static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb);
#endif
/* Import.proto */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level);
/* FastTypeChecks.proto */
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type)
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b);
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type);
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2);
#else
#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type)
#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2))
#endif
#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception)
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
/* ListCompAppend.proto */
#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) {
PyListObject* L = (PyListObject*) list;
Py_ssize_t len = Py_SIZE(list);
if (likely(L->allocated > len)) {
Py_INCREF(x);
PyList_SET_ITEM(list, len, x);
__Pyx_SET_SIZE(list, len + 1);
return 0;
}
return PyList_Append(list, x);
}
#else
#define __Pyx_ListComp_Append(L,x) PyList_Append(L,x)
#endif
/* PyIntBinop.proto */
#if !CYTHON_COMPILING_IN_PYPY
static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check);
#else
#define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace, zerodivision_check)\
(inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2))
#endif
/* ListExtend.proto */
static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) {
#if CYTHON_COMPILING_IN_CPYTHON
PyObject* none = _PyList_Extend((PyListObject*)L, v);
if (unlikely(!none))
return -1;
Py_DECREF(none);
return 0;
#else
return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v);
#endif
}
/* ListAppend.proto */
#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) {
PyListObject* L = (PyListObject*) list;
Py_ssize_t len = Py_SIZE(list);
if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) {
Py_INCREF(x);
PyList_SET_ITEM(list, len, x);
__Pyx_SET_SIZE(list, len + 1);
return 0;
}
return PyList_Append(list, x);
}
#else
#define __Pyx_PyList_Append(L,x) PyList_Append(L,x)
#endif
/* None.proto */
static CYTHON_INLINE long __Pyx_div_long(long, long);
/* ImportFrom.proto */
static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name);
/* HasAttr.proto */
static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *);
/* PyObject_GenericGetAttrNoDict.proto */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr
#endif
/* PyObject_GenericGetAttr.proto */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr
#endif
/* SetVTable.proto */
static int __Pyx_SetVtable(PyObject *dict, void *vtable);
/* PyObjectGetAttrStrNoError.proto */
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name);
/* SetupReduce.proto */
static int __Pyx_setup_reduce(PyObject* type_obj);
/* CLineInTraceback.proto */
#ifdef CYTHON_CLINE_IN_TRACEBACK
#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0)
#else
static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line);
#endif
/* CodeObjectCache.proto */
typedef struct {
PyCodeObject* code_object;
int code_line;
} __Pyx_CodeObjectCacheEntry;
struct __Pyx_CodeObjectCache {
int count;
int max_count;
__Pyx_CodeObjectCacheEntry* entries;
};
static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
static PyCodeObject *__pyx_find_code_object(int code_line);
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
/* AddTraceback.proto */
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename);
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags);
static void __Pyx_ReleaseBuffer(Py_buffer *view);
#else
#define __Pyx_GetBuffer PyObject_GetBuffer
#define __Pyx_ReleaseBuffer PyBuffer_Release
#endif
/* BufferStructDeclare.proto */
typedef struct {
Py_ssize_t shape, strides, suboffsets;
} __Pyx_Buf_DimInfo;
typedef struct {
size_t refcount;
Py_buffer pybuffer;
} __Pyx_Buffer;
typedef struct {
__Pyx_Buffer *rcbuffer;
char *data;
__Pyx_Buf_DimInfo diminfo[8];
} __Pyx_LocalBuf_ND;
/* MemviewSliceIsContig.proto */
static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim);
/* OverlappingSlices.proto */
static int __pyx_slices_overlap(__Pyx_memviewslice *slice1,
__Pyx_memviewslice *slice2,
int ndim, size_t itemsize);
/* Capsule.proto */
static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig);
/* IsLittleEndian.proto */
static CYTHON_INLINE int __Pyx_Is_Little_Endian(void);
/* BufferFormatCheck.proto */
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts);
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type);
/* TypeInfoCompare.proto */
static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b);
/* MemviewSliceValidateAndInit.proto */
static int __Pyx_ValidateAndInit_memviewslice(
int *axes_specs,
int c_or_f_flag,
int buf_flags,
int ndim,
__Pyx_TypeInfo *dtype,
__Pyx_BufFmt_StackElem stack[],
__Pyx_memviewslice *memviewslice,
PyObject *original_obj);
/* ObjectToMemviewSlice.proto */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_int(PyObject *, int writable_flag);
/* ObjectToMemviewSlice.proto */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_float(PyObject *, int writable_flag);
/* ObjectToMemviewSlice.proto */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int(PyObject *, int writable_flag);
/* GCCDiagnostics.proto */
#if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))
#define __Pyx_HAS_GCC_DIAGNOSTIC
#endif
/* MemviewSliceCopyTemplate.proto */
static __Pyx_memviewslice
__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs,
const char *mode, int ndim,
size_t sizeof_dtype, int contig_flag,
int dtype_is_object);
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value);
/* CIntFromPy.proto */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value);
/* CIntFromPy.proto */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *);
/* CIntFromPy.proto */
static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *);
/* CheckBinaryVersion.proto */
static int __Pyx_check_binary_version(void);
/* InitStrings.proto */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t);
static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self); /* proto*/
static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/
static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/
static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/
static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/
static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/
static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/
/* Module declarations from 'cython.view' */
/* Module declarations from 'cython' */
/* Module declarations from 'monotonic_align.core' */
static PyTypeObject *__pyx_array_type = 0;
static PyTypeObject *__pyx_MemviewEnum_type = 0;
static PyTypeObject *__pyx_memoryview_type = 0;
static PyTypeObject *__pyx_memoryviewslice_type = 0;
static PyObject *generic = 0;
static PyObject *strided = 0;
static PyObject *indirect = 0;
static PyObject *contiguous = 0;
static PyObject *indirect_contiguous = 0;
static int __pyx_memoryview_thread_locks_used;
static PyThread_type_lock __pyx_memoryview_thread_locks[8];
static void __pyx_f_15monotonic_align_4core_maximum_path_each(__Pyx_memviewslice, __Pyx_memviewslice, int, int, struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each *__pyx_optional_args); /*proto*/
static void __pyx_f_15monotonic_align_4core_maximum_path_c(__Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, int __pyx_skip_dispatch); /*proto*/
static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/
static void *__pyx_align_pointer(void *, size_t); /*proto*/
static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/
static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/
static PyObject *_unellipsify(PyObject *, int); /*proto*/
static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/
static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/
static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/
static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/
static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/
static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/
static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/
static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/
static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/
static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/
static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/
static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/
static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/
static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/
static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/
static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/
static int __pyx_memoryview_err(PyObject *, char *); /*proto*/
static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/
static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/
static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/
static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/
static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/
static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/
static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/
static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *, PyObject *); /*proto*/
static __Pyx_TypeInfo __Pyx_TypeInfo_int = { "int", NULL, sizeof(int), { 0 }, 0, IS_UNSIGNED(int) ? 'U' : 'I', IS_UNSIGNED(int), 0 };
static __Pyx_TypeInfo __Pyx_TypeInfo_float = { "float", NULL, sizeof(float), { 0 }, 0, 'R', 0, 0 };
#define __Pyx_MODULE_NAME "monotonic_align.core"
extern int __pyx_module_is_main_monotonic_align__core;
int __pyx_module_is_main_monotonic_align__core = 0;
/* Implementation of 'monotonic_align.core' */
static PyObject *__pyx_builtin_range;
static PyObject *__pyx_builtin_ValueError;
static PyObject *__pyx_builtin_MemoryError;
static PyObject *__pyx_builtin_enumerate;
static PyObject *__pyx_builtin_TypeError;
static PyObject *__pyx_builtin_Ellipsis;
static PyObject *__pyx_builtin_id;
static PyObject *__pyx_builtin_IndexError;
static const char __pyx_k_O[] = "O";
static const char __pyx_k_c[] = "c";
static const char __pyx_k_id[] = "id";
static const char __pyx_k_new[] = "__new__";
static const char __pyx_k_obj[] = "obj";
static const char __pyx_k_base[] = "base";
static const char __pyx_k_dict[] = "__dict__";
static const char __pyx_k_main[] = "__main__";
static const char __pyx_k_mode[] = "mode";
static const char __pyx_k_name[] = "name";
static const char __pyx_k_ndim[] = "ndim";
static const char __pyx_k_pack[] = "pack";
static const char __pyx_k_size[] = "size";
static const char __pyx_k_step[] = "step";
static const char __pyx_k_stop[] = "stop";
static const char __pyx_k_t_xs[] = "t_xs";
static const char __pyx_k_t_ys[] = "t_ys";
static const char __pyx_k_test[] = "__test__";
static const char __pyx_k_ASCII[] = "ASCII";
static const char __pyx_k_class[] = "__class__";
static const char __pyx_k_error[] = "error";
static const char __pyx_k_flags[] = "flags";
static const char __pyx_k_paths[] = "paths";
static const char __pyx_k_range[] = "range";
static const char __pyx_k_shape[] = "shape";
static const char __pyx_k_start[] = "start";
static const char __pyx_k_encode[] = "encode";
static const char __pyx_k_format[] = "format";
static const char __pyx_k_import[] = "__import__";
static const char __pyx_k_name_2[] = "__name__";
static const char __pyx_k_pickle[] = "pickle";
static const char __pyx_k_reduce[] = "__reduce__";
static const char __pyx_k_struct[] = "struct";
static const char __pyx_k_unpack[] = "unpack";
static const char __pyx_k_update[] = "update";
static const char __pyx_k_values[] = "values";
static const char __pyx_k_fortran[] = "fortran";
static const char __pyx_k_memview[] = "memview";
static const char __pyx_k_Ellipsis[] = "Ellipsis";
static const char __pyx_k_getstate[] = "__getstate__";
static const char __pyx_k_itemsize[] = "itemsize";
static const char __pyx_k_pyx_type[] = "__pyx_type";
static const char __pyx_k_setstate[] = "__setstate__";
static const char __pyx_k_TypeError[] = "TypeError";
static const char __pyx_k_enumerate[] = "enumerate";
static const char __pyx_k_pyx_state[] = "__pyx_state";
static const char __pyx_k_reduce_ex[] = "__reduce_ex__";
static const char __pyx_k_IndexError[] = "IndexError";
static const char __pyx_k_ValueError[] = "ValueError";
static const char __pyx_k_pyx_result[] = "__pyx_result";
static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__";
static const char __pyx_k_MemoryError[] = "MemoryError";
static const char __pyx_k_PickleError[] = "PickleError";
static const char __pyx_k_pyx_checksum[] = "__pyx_checksum";
static const char __pyx_k_stringsource[] = "stringsource";
static const char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer";
static const char __pyx_k_reduce_cython[] = "__reduce_cython__";
static const char __pyx_k_View_MemoryView[] = "View.MemoryView";
static const char __pyx_k_allocate_buffer[] = "allocate_buffer";
static const char __pyx_k_dtype_is_object[] = "dtype_is_object";
static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError";
static const char __pyx_k_setstate_cython[] = "__setstate_cython__";
static const char __pyx_k_pyx_unpickle_Enum[] = "__pyx_unpickle_Enum";
static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback";
static const char __pyx_k_strided_and_direct[] = "<strided and direct>";
static const char __pyx_k_strided_and_indirect[] = "<strided and indirect>";
static const char __pyx_k_contiguous_and_direct[] = "<contiguous and direct>";
static const char __pyx_k_MemoryView_of_r_object[] = "<MemoryView of %r object>";
static const char __pyx_k_MemoryView_of_r_at_0x_x[] = "<MemoryView of %r at 0x%x>";
static const char __pyx_k_contiguous_and_indirect[] = "<contiguous and indirect>";
static const char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'";
static const char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d.";
static const char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array";
static const char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data.";
static const char __pyx_k_strided_and_direct_or_indirect[] = "<strided and direct or indirect>";
static const char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides";
static const char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory.";
static const char __pyx_k_Cannot_assign_to_read_only_memor[] = "Cannot assign to read-only memoryview";
static const char __pyx_k_Cannot_create_writable_memory_vi[] = "Cannot create writable memory view from read-only memoryview";
static const char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array";
static const char __pyx_k_Incompatible_checksums_s_vs_0xb0[] = "Incompatible checksums (%s vs 0xb068931 = (name))";
static const char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported";
static const char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s";
static const char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)";
static const char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object";
static const char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)";
static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__";
static const char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides.";
static PyObject *__pyx_n_s_ASCII;
static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri;
static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is;
static PyObject *__pyx_kp_s_Cannot_assign_to_read_only_memor;
static PyObject *__pyx_kp_s_Cannot_create_writable_memory_vi;
static PyObject *__pyx_kp_s_Cannot_index_with_type_s;
static PyObject *__pyx_n_s_Ellipsis;
static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr;
static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0xb0;
static PyObject *__pyx_n_s_IndexError;
static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte;
static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr;
static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d;
static PyObject *__pyx_n_s_MemoryError;
static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x;
static PyObject *__pyx_kp_s_MemoryView_of_r_object;
static PyObject *__pyx_n_b_O;
static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a;
static PyObject *__pyx_n_s_PickleError;
static PyObject *__pyx_n_s_TypeError;
static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object;
static PyObject *__pyx_n_s_ValueError;
static PyObject *__pyx_n_s_View_MemoryView;
static PyObject *__pyx_n_s_allocate_buffer;
static PyObject *__pyx_n_s_base;
static PyObject *__pyx_n_s_c;
static PyObject *__pyx_n_u_c;
static PyObject *__pyx_n_s_class;
static PyObject *__pyx_n_s_cline_in_traceback;
static PyObject *__pyx_kp_s_contiguous_and_direct;
static PyObject *__pyx_kp_s_contiguous_and_indirect;
static PyObject *__pyx_n_s_dict;
static PyObject *__pyx_n_s_dtype_is_object;
static PyObject *__pyx_n_s_encode;
static PyObject *__pyx_n_s_enumerate;
static PyObject *__pyx_n_s_error;
static PyObject *__pyx_n_s_flags;
static PyObject *__pyx_n_s_format;
static PyObject *__pyx_n_s_fortran;
static PyObject *__pyx_n_u_fortran;
static PyObject *__pyx_n_s_getstate;
static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi;
static PyObject *__pyx_n_s_id;
static PyObject *__pyx_n_s_import;
static PyObject *__pyx_n_s_itemsize;
static PyObject *__pyx_kp_s_itemsize_0_for_cython_array;
static PyObject *__pyx_n_s_main;
static PyObject *__pyx_n_s_memview;
static PyObject *__pyx_n_s_mode;
static PyObject *__pyx_n_s_name;
static PyObject *__pyx_n_s_name_2;
static PyObject *__pyx_n_s_ndim;
static PyObject *__pyx_n_s_new;
static PyObject *__pyx_kp_s_no_default___reduce___due_to_non;
static PyObject *__pyx_n_s_obj;
static PyObject *__pyx_n_s_pack;
static PyObject *__pyx_n_s_paths;
static PyObject *__pyx_n_s_pickle;
static PyObject *__pyx_n_s_pyx_PickleError;
static PyObject *__pyx_n_s_pyx_checksum;
static PyObject *__pyx_n_s_pyx_getbuffer;
static PyObject *__pyx_n_s_pyx_result;
static PyObject *__pyx_n_s_pyx_state;
static PyObject *__pyx_n_s_pyx_type;
static PyObject *__pyx_n_s_pyx_unpickle_Enum;
static PyObject *__pyx_n_s_pyx_vtable;
static PyObject *__pyx_n_s_range;
static PyObject *__pyx_n_s_reduce;
static PyObject *__pyx_n_s_reduce_cython;
static PyObject *__pyx_n_s_reduce_ex;
static PyObject *__pyx_n_s_setstate;
static PyObject *__pyx_n_s_setstate_cython;
static PyObject *__pyx_n_s_shape;
static PyObject *__pyx_n_s_size;
static PyObject *__pyx_n_s_start;
static PyObject *__pyx_n_s_step;
static PyObject *__pyx_n_s_stop;
static PyObject *__pyx_kp_s_strided_and_direct;
static PyObject *__pyx_kp_s_strided_and_direct_or_indirect;
static PyObject *__pyx_kp_s_strided_and_indirect;
static PyObject *__pyx_kp_s_stringsource;
static PyObject *__pyx_n_s_struct;
static PyObject *__pyx_n_s_t_xs;
static PyObject *__pyx_n_s_t_ys;
static PyObject *__pyx_n_s_test;
static PyObject *__pyx_kp_s_unable_to_allocate_array_data;
static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str;
static PyObject *__pyx_n_s_unpack;
static PyObject *__pyx_n_s_update;
static PyObject *__pyx_n_s_values;
static PyObject *__pyx_pf_15monotonic_align_4core_maximum_path_c(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_paths, __Pyx_memviewslice __pyx_v_values, __Pyx_memviewslice __pyx_v_t_ys, __Pyx_memviewslice __pyx_v_t_xs); /* proto */
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */
static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */
static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */
static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */
static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_int_0;
static PyObject *__pyx_int_1;
static PyObject *__pyx_int_184977713;
static PyObject *__pyx_int_neg_1;
static float __pyx_k_;
static PyObject *__pyx_tuple__2;
static PyObject *__pyx_tuple__3;
static PyObject *__pyx_tuple__4;
static PyObject *__pyx_tuple__5;
static PyObject *__pyx_tuple__6;
static PyObject *__pyx_tuple__7;
static PyObject *__pyx_tuple__8;
static PyObject *__pyx_tuple__9;
static PyObject *__pyx_slice__16;
static PyObject *__pyx_tuple__10;
static PyObject *__pyx_tuple__11;
static PyObject *__pyx_tuple__12;
static PyObject *__pyx_tuple__13;
static PyObject *__pyx_tuple__14;
static PyObject *__pyx_tuple__15;
static PyObject *__pyx_tuple__17;
static PyObject *__pyx_tuple__18;
static PyObject *__pyx_tuple__19;
static PyObject *__pyx_tuple__20;
static PyObject *__pyx_tuple__21;
static PyObject *__pyx_tuple__22;
static PyObject *__pyx_tuple__23;
static PyObject *__pyx_tuple__24;
static PyObject *__pyx_tuple__25;
static PyObject *__pyx_codeobj__26;
/* Late includes */
/* "monotonic_align/core.pyx":7
* @cython.boundscheck(False)
* @cython.wraparound(False)
* cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<<
* cdef int x
* cdef int y
*/
static void __pyx_f_15monotonic_align_4core_maximum_path_each(__Pyx_memviewslice __pyx_v_path, __Pyx_memviewslice __pyx_v_value, int __pyx_v_t_y, int __pyx_v_t_x, struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each *__pyx_optional_args) {
float __pyx_v_max_neg_val = __pyx_k_;
int __pyx_v_x;
int __pyx_v_y;
float __pyx_v_v_prev;
float __pyx_v_v_cur;
int __pyx_v_index;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
long __pyx_t_4;
int __pyx_t_5;
long __pyx_t_6;
long __pyx_t_7;
int __pyx_t_8;
Py_ssize_t __pyx_t_9;
Py_ssize_t __pyx_t_10;
float __pyx_t_11;
float __pyx_t_12;
float __pyx_t_13;
int __pyx_t_14;
Py_ssize_t __pyx_t_15;
Py_ssize_t __pyx_t_16;
if (__pyx_optional_args) {
if (__pyx_optional_args->__pyx_n > 0) {
__pyx_v_max_neg_val = __pyx_optional_args->max_neg_val;
}
}
/* "monotonic_align/core.pyx":13
* cdef float v_cur
* cdef float tmp
* cdef int index = t_x - 1 # <<<<<<<<<<<<<<
*
* for y in range(t_y):
*/
__pyx_v_index = (__pyx_v_t_x - 1);
/* "monotonic_align/core.pyx":15
* cdef int index = t_x - 1
*
* for y in range(t_y): # <<<<<<<<<<<<<<
* for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)):
* if x == y:
*/
__pyx_t_1 = __pyx_v_t_y;
__pyx_t_2 = __pyx_t_1;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_y = __pyx_t_3;
/* "monotonic_align/core.pyx":16
*
* for y in range(t_y):
* for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): # <<<<<<<<<<<<<<
* if x == y:
* v_cur = max_neg_val
*/
__pyx_t_4 = (__pyx_v_y + 1);
__pyx_t_5 = __pyx_v_t_x;
if (((__pyx_t_4 < __pyx_t_5) != 0)) {
__pyx_t_6 = __pyx_t_4;
} else {
__pyx_t_6 = __pyx_t_5;
}
__pyx_t_4 = __pyx_t_6;
__pyx_t_5 = ((__pyx_v_t_x + __pyx_v_y) - __pyx_v_t_y);
__pyx_t_6 = 0;
if (((__pyx_t_5 > __pyx_t_6) != 0)) {
__pyx_t_7 = __pyx_t_5;
} else {
__pyx_t_7 = __pyx_t_6;
}
__pyx_t_6 = __pyx_t_4;
for (__pyx_t_5 = __pyx_t_7; __pyx_t_5 < __pyx_t_6; __pyx_t_5+=1) {
__pyx_v_x = __pyx_t_5;
/* "monotonic_align/core.pyx":17
* for y in range(t_y):
* for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)):
* if x == y: # <<<<<<<<<<<<<<
* v_cur = max_neg_val
* else:
*/
__pyx_t_8 = ((__pyx_v_x == __pyx_v_y) != 0);
if (__pyx_t_8) {
/* "monotonic_align/core.pyx":18
* for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)):
* if x == y:
* v_cur = max_neg_val # <<<<<<<<<<<<<<
* else:
* v_cur = value[y-1, x]
*/
__pyx_v_v_cur = __pyx_v_max_neg_val;
/* "monotonic_align/core.pyx":17
* for y in range(t_y):
* for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)):
* if x == y: # <<<<<<<<<<<<<<
* v_cur = max_neg_val
* else:
*/
goto __pyx_L7;
}
/* "monotonic_align/core.pyx":20
* v_cur = max_neg_val
* else:
* v_cur = value[y-1, x] # <<<<<<<<<<<<<<
* if x == 0:
* if y == 0:
*/
/*else*/ {
__pyx_t_9 = (__pyx_v_y - 1);
__pyx_t_10 = __pyx_v_x;
__pyx_v_v_cur = (*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_9 * __pyx_v_value.strides[0]) )) + __pyx_t_10)) )));
}
__pyx_L7:;
/* "monotonic_align/core.pyx":21
* else:
* v_cur = value[y-1, x]
* if x == 0: # <<<<<<<<<<<<<<
* if y == 0:
* v_prev = 0.
*/
__pyx_t_8 = ((__pyx_v_x == 0) != 0);
if (__pyx_t_8) {
/* "monotonic_align/core.pyx":22
* v_cur = value[y-1, x]
* if x == 0:
* if y == 0: # <<<<<<<<<<<<<<
* v_prev = 0.
* else:
*/
__pyx_t_8 = ((__pyx_v_y == 0) != 0);
if (__pyx_t_8) {
/* "monotonic_align/core.pyx":23
* if x == 0:
* if y == 0:
* v_prev = 0. # <<<<<<<<<<<<<<
* else:
* v_prev = max_neg_val
*/
__pyx_v_v_prev = 0.;
/* "monotonic_align/core.pyx":22
* v_cur = value[y-1, x]
* if x == 0:
* if y == 0: # <<<<<<<<<<<<<<
* v_prev = 0.
* else:
*/
goto __pyx_L9;
}
/* "monotonic_align/core.pyx":25
* v_prev = 0.
* else:
* v_prev = max_neg_val # <<<<<<<<<<<<<<
* else:
* v_prev = value[y-1, x-1]
*/
/*else*/ {
__pyx_v_v_prev = __pyx_v_max_neg_val;
}
__pyx_L9:;
/* "monotonic_align/core.pyx":21
* else:
* v_cur = value[y-1, x]
* if x == 0: # <<<<<<<<<<<<<<
* if y == 0:
* v_prev = 0.
*/
goto __pyx_L8;
}
/* "monotonic_align/core.pyx":27
* v_prev = max_neg_val
* else:
* v_prev = value[y-1, x-1] # <<<<<<<<<<<<<<
* value[y, x] += max(v_prev, v_cur)
*
*/
/*else*/ {
__pyx_t_10 = (__pyx_v_y - 1);
__pyx_t_9 = (__pyx_v_x - 1);
__pyx_v_v_prev = (*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_10 * __pyx_v_value.strides[0]) )) + __pyx_t_9)) )));
}
__pyx_L8:;
/* "monotonic_align/core.pyx":28
* else:
* v_prev = value[y-1, x-1]
* value[y, x] += max(v_prev, v_cur) # <<<<<<<<<<<<<<
*
* for y in range(t_y - 1, -1, -1):
*/
__pyx_t_11 = __pyx_v_v_cur;
__pyx_t_12 = __pyx_v_v_prev;
if (((__pyx_t_11 > __pyx_t_12) != 0)) {
__pyx_t_13 = __pyx_t_11;
} else {
__pyx_t_13 = __pyx_t_12;
}
__pyx_t_9 = __pyx_v_y;
__pyx_t_10 = __pyx_v_x;
*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_9 * __pyx_v_value.strides[0]) )) + __pyx_t_10)) )) += __pyx_t_13;
}
}
/* "monotonic_align/core.pyx":30
* value[y, x] += max(v_prev, v_cur)
*
* for y in range(t_y - 1, -1, -1): # <<<<<<<<<<<<<<
* path[y, index] = 1
* if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]):
*/
for (__pyx_t_1 = (__pyx_v_t_y - 1); __pyx_t_1 > -1; __pyx_t_1-=1) {
__pyx_v_y = __pyx_t_1;
/* "monotonic_align/core.pyx":31
*
* for y in range(t_y - 1, -1, -1):
* path[y, index] = 1 # <<<<<<<<<<<<<<
* if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]):
* index = index - 1
*/
__pyx_t_10 = __pyx_v_y;
__pyx_t_9 = __pyx_v_index;
*((int *) ( /* dim=1 */ ((char *) (((int *) ( /* dim=0 */ (__pyx_v_path.data + __pyx_t_10 * __pyx_v_path.strides[0]) )) + __pyx_t_9)) )) = 1;
/* "monotonic_align/core.pyx":32
* for y in range(t_y - 1, -1, -1):
* path[y, index] = 1
* if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): # <<<<<<<<<<<<<<
* index = index - 1
*
*/
__pyx_t_14 = ((__pyx_v_index != 0) != 0);
if (__pyx_t_14) {
} else {
__pyx_t_8 = __pyx_t_14;
goto __pyx_L13_bool_binop_done;
}
__pyx_t_14 = ((__pyx_v_index == __pyx_v_y) != 0);
if (!__pyx_t_14) {
} else {
__pyx_t_8 = __pyx_t_14;
goto __pyx_L13_bool_binop_done;
}
__pyx_t_9 = (__pyx_v_y - 1);
__pyx_t_10 = __pyx_v_index;
__pyx_t_15 = (__pyx_v_y - 1);
__pyx_t_16 = (__pyx_v_index - 1);
__pyx_t_14 = (((*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_9 * __pyx_v_value.strides[0]) )) + __pyx_t_10)) ))) < (*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_15 * __pyx_v_value.strides[0]) )) + __pyx_t_16)) )))) != 0);
__pyx_t_8 = __pyx_t_14;
__pyx_L13_bool_binop_done:;
if (__pyx_t_8) {
/* "monotonic_align/core.pyx":33
* path[y, index] = 1
* if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]):
* index = index - 1 # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_index = (__pyx_v_index - 1);
/* "monotonic_align/core.pyx":32
* for y in range(t_y - 1, -1, -1):
* path[y, index] = 1
* if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): # <<<<<<<<<<<<<<
* index = index - 1
*
*/
}
}
/* "monotonic_align/core.pyx":7
* @cython.boundscheck(False)
* @cython.wraparound(False)
* cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<<
* cdef int x
* cdef int y
*/
/* function exit code */
}
/* "monotonic_align/core.pyx":38
* @cython.boundscheck(False)
* @cython.wraparound(False)
* cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_ys, int[::1] t_xs) nogil: # <<<<<<<<<<<<<<
* cdef int b = paths.shape[0]
* cdef int i
*/
static PyObject *__pyx_pw_15monotonic_align_4core_1maximum_path_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static void __pyx_f_15monotonic_align_4core_maximum_path_c(__Pyx_memviewslice __pyx_v_paths, __Pyx_memviewslice __pyx_v_values, __Pyx_memviewslice __pyx_v_t_ys, __Pyx_memviewslice __pyx_v_t_xs, CYTHON_UNUSED int __pyx_skip_dispatch) {
CYTHON_UNUSED int __pyx_v_b;
int __pyx_v_i;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
__Pyx_memviewslice __pyx_t_4 = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_t_5 = { 0, 0, { 0 }, { 0 }, { 0 } };
Py_ssize_t __pyx_t_6;
Py_ssize_t __pyx_t_7;
/* "monotonic_align/core.pyx":39
* @cython.wraparound(False)
* cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_ys, int[::1] t_xs) nogil:
* cdef int b = paths.shape[0] # <<<<<<<<<<<<<<
* cdef int i
* for i in prange(b, nogil=True):
*/
__pyx_v_b = (__pyx_v_paths.shape[0]);
/* "monotonic_align/core.pyx":41
* cdef int b = paths.shape[0]
* cdef int i
* for i in prange(b, nogil=True): # <<<<<<<<<<<<<<
* maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i])
*/
{
#ifdef WITH_THREAD
PyThreadState *_save;
Py_UNBLOCK_THREADS
__Pyx_FastGIL_Remember();
#endif
/*try:*/ {
__pyx_t_1 = __pyx_v_b;
if ((1 == 0)) abort();
{
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) (x)
#define unlikely(x) (x)
#endif
__pyx_t_3 = (__pyx_t_1 - 0 + 1 - 1/abs(1)) / 1;
if (__pyx_t_3 > 0)
{
#ifdef _OPENMP
#pragma omp parallel private(__pyx_t_6, __pyx_t_7) firstprivate(__pyx_t_4, __pyx_t_5)
#endif /* _OPENMP */
{
#ifdef _OPENMP
#pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i)
#endif /* _OPENMP */
for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_3; __pyx_t_2++){
{
__pyx_v_i = (int)(0 + 1 * __pyx_t_2);
/* "monotonic_align/core.pyx":42
* cdef int i
* for i in prange(b, nogil=True):
* maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i]) # <<<<<<<<<<<<<<
*/
__pyx_t_4.data = __pyx_v_paths.data;
__pyx_t_4.memview = __pyx_v_paths.memview;
__PYX_INC_MEMVIEW(&__pyx_t_4, 0);
{
Py_ssize_t __pyx_tmp_idx = __pyx_v_i;
Py_ssize_t __pyx_tmp_stride = __pyx_v_paths.strides[0];
__pyx_t_4.data += __pyx_tmp_idx * __pyx_tmp_stride;
}
__pyx_t_4.shape[0] = __pyx_v_paths.shape[1];
__pyx_t_4.strides[0] = __pyx_v_paths.strides[1];
__pyx_t_4.suboffsets[0] = -1;
__pyx_t_4.shape[1] = __pyx_v_paths.shape[2];
__pyx_t_4.strides[1] = __pyx_v_paths.strides[2];
__pyx_t_4.suboffsets[1] = -1;
__pyx_t_5.data = __pyx_v_values.data;
__pyx_t_5.memview = __pyx_v_values.memview;
__PYX_INC_MEMVIEW(&__pyx_t_5, 0);
{
Py_ssize_t __pyx_tmp_idx = __pyx_v_i;
Py_ssize_t __pyx_tmp_stride = __pyx_v_values.strides[0];
__pyx_t_5.data += __pyx_tmp_idx * __pyx_tmp_stride;
}
__pyx_t_5.shape[0] = __pyx_v_values.shape[1];
__pyx_t_5.strides[0] = __pyx_v_values.strides[1];
__pyx_t_5.suboffsets[0] = -1;
__pyx_t_5.shape[1] = __pyx_v_values.shape[2];
__pyx_t_5.strides[1] = __pyx_v_values.strides[2];
__pyx_t_5.suboffsets[1] = -1;
__pyx_t_6 = __pyx_v_i;
__pyx_t_7 = __pyx_v_i;
__pyx_f_15monotonic_align_4core_maximum_path_each(__pyx_t_4, __pyx_t_5, (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_t_ys.data) + __pyx_t_6)) ))), (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_t_xs.data) + __pyx_t_7)) ))), NULL);
__PYX_XDEC_MEMVIEW(&__pyx_t_4, 0);
__pyx_t_4.memview = NULL;
__pyx_t_4.data = NULL;
__PYX_XDEC_MEMVIEW(&__pyx_t_5, 0);
__pyx_t_5.memview = NULL;
__pyx_t_5.data = NULL;
}
}
}
}
}
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
}
/* "monotonic_align/core.pyx":41
* cdef int b = paths.shape[0]
* cdef int i
* for i in prange(b, nogil=True): # <<<<<<<<<<<<<<
* maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i])
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
__Pyx_FastGIL_Forget();
Py_BLOCK_THREADS
#endif
goto __pyx_L5;
}
__pyx_L5:;
}
}
/* "monotonic_align/core.pyx":38
* @cython.boundscheck(False)
* @cython.wraparound(False)
* cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_ys, int[::1] t_xs) nogil: # <<<<<<<<<<<<<<
* cdef int b = paths.shape[0]
* cdef int i
*/
/* function exit code */
}
/* Python wrapper */
static PyObject *__pyx_pw_15monotonic_align_4core_1maximum_path_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyObject *__pyx_pw_15monotonic_align_4core_1maximum_path_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
__Pyx_memviewslice __pyx_v_paths = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_values = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_t_ys = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_t_xs = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("maximum_path_c (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_paths,&__pyx_n_s_values,&__pyx_n_s_t_ys,&__pyx_n_s_t_xs,0};
PyObject* values[4] = {0,0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_paths)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_values)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, 1); __PYX_ERR(0, 38, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_t_ys)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, 2); __PYX_ERR(0, 38, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 3:
if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_t_xs)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, 3); __PYX_ERR(0, 38, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "maximum_path_c") < 0)) __PYX_ERR(0, 38, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 4) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
}
__pyx_v_paths = __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_int(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_paths.memview)) __PYX_ERR(0, 38, __pyx_L3_error)
__pyx_v_values = __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_float(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_values.memview)) __PYX_ERR(0, 38, __pyx_L3_error)
__pyx_v_t_ys = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[2], PyBUF_WRITABLE); if (unlikely(!__pyx_v_t_ys.memview)) __PYX_ERR(0, 38, __pyx_L3_error)
__pyx_v_t_xs = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[3], PyBUF_WRITABLE); if (unlikely(!__pyx_v_t_xs.memview)) __PYX_ERR(0, 38, __pyx_L3_error)
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 38, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("monotonic_align.core.maximum_path_c", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_15monotonic_align_4core_maximum_path_c(__pyx_self, __pyx_v_paths, __pyx_v_values, __pyx_v_t_ys, __pyx_v_t_xs);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15monotonic_align_4core_maximum_path_c(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_paths, __Pyx_memviewslice __pyx_v_values, __Pyx_memviewslice __pyx_v_t_ys, __Pyx_memviewslice __pyx_v_t_xs) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("maximum_path_c", 0);
__Pyx_XDECREF(__pyx_r);
if (unlikely(!__pyx_v_paths.memview)) { __Pyx_RaiseUnboundLocalError("paths"); __PYX_ERR(0, 38, __pyx_L1_error) }
if (unlikely(!__pyx_v_values.memview)) { __Pyx_RaiseUnboundLocalError("values"); __PYX_ERR(0, 38, __pyx_L1_error) }
if (unlikely(!__pyx_v_t_ys.memview)) { __Pyx_RaiseUnboundLocalError("t_ys"); __PYX_ERR(0, 38, __pyx_L1_error) }
if (unlikely(!__pyx_v_t_xs.memview)) { __Pyx_RaiseUnboundLocalError("t_xs"); __PYX_ERR(0, 38, __pyx_L1_error) }
__pyx_t_1 = __Pyx_void_to_None(__pyx_f_15monotonic_align_4core_maximum_path_c(__pyx_v_paths, __pyx_v_values, __pyx_v_t_ys, __pyx_v_t_xs, 0)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 38, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("monotonic_align.core.maximum_path_c", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__PYX_XDEC_MEMVIEW(&__pyx_v_paths, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_values, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_t_ys, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_t_xs, 1);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":122
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode="c", bint allocate_buffer=True):
*
*/
/* Python wrapper */
static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_shape = 0;
Py_ssize_t __pyx_v_itemsize;
PyObject *__pyx_v_format = 0;
PyObject *__pyx_v_mode = 0;
int __pyx_v_allocate_buffer;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0};
PyObject* values[5] = {0,0,0,0,0};
values[3] = ((PyObject *)__pyx_n_s_c);
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
CYTHON_FALLTHROUGH;
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); __PYX_ERR(1, 122, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); __PYX_ERR(1, 122, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 3:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mode);
if (value) { values[3] = value; kw_args--; }
}
CYTHON_FALLTHROUGH;
case 4:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_allocate_buffer);
if (value) { values[4] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 122, __pyx_L3_error)
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
CYTHON_FALLTHROUGH;
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_shape = ((PyObject*)values[0]);
__pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 122, __pyx_L3_error)
__pyx_v_format = values[2];
__pyx_v_mode = values[3];
if (values[4]) {
__pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 123, __pyx_L3_error)
} else {
/* "View.MemoryView":123
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None,
* mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<<
*
* cdef int idx
*/
__pyx_v_allocate_buffer = ((int)1);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 122, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(1, 122, __pyx_L1_error)
if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) {
PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); __PYX_ERR(1, 122, __pyx_L1_error)
}
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer);
/* "View.MemoryView":122
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode="c", bint allocate_buffer=True):
*
*/
/* function exit code */
goto __pyx_L0;
__pyx_L1_error:;
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) {
int __pyx_v_idx;
Py_ssize_t __pyx_v_i;
Py_ssize_t __pyx_v_dim;
PyObject **__pyx_v_p;
char __pyx_v_order;
int __pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
char *__pyx_t_7;
int __pyx_t_8;
Py_ssize_t __pyx_t_9;
PyObject *__pyx_t_10 = NULL;
Py_ssize_t __pyx_t_11;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__cinit__", 0);
__Pyx_INCREF(__pyx_v_format);
/* "View.MemoryView":129
* cdef PyObject **p
*
* self.ndim = <int> len(shape) # <<<<<<<<<<<<<<
* self.itemsize = itemsize
*
*/
if (unlikely(__pyx_v_shape == Py_None)) {
PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
__PYX_ERR(1, 129, __pyx_L1_error)
}
__pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(1, 129, __pyx_L1_error)
__pyx_v_self->ndim = ((int)__pyx_t_1);
/* "View.MemoryView":130
*
* self.ndim = <int> len(shape)
* self.itemsize = itemsize # <<<<<<<<<<<<<<
*
* if not self.ndim:
*/
__pyx_v_self->itemsize = __pyx_v_itemsize;
/* "View.MemoryView":132
* self.itemsize = itemsize
*
* if not self.ndim: # <<<<<<<<<<<<<<
* raise ValueError("Empty shape tuple for cython.array")
*
*/
__pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":133
*
* if not self.ndim:
* raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<<
*
* if itemsize <= 0:
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 133, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 133, __pyx_L1_error)
/* "View.MemoryView":132
* self.itemsize = itemsize
*
* if not self.ndim: # <<<<<<<<<<<<<<
* raise ValueError("Empty shape tuple for cython.array")
*
*/
}
/* "View.MemoryView":135
* raise ValueError("Empty shape tuple for cython.array")
*
* if itemsize <= 0: # <<<<<<<<<<<<<<
* raise ValueError("itemsize <= 0 for cython.array")
*
*/
__pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":136
*
* if itemsize <= 0:
* raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<<
*
* if not isinstance(format, bytes):
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 136, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 136, __pyx_L1_error)
/* "View.MemoryView":135
* raise ValueError("Empty shape tuple for cython.array")
*
* if itemsize <= 0: # <<<<<<<<<<<<<<
* raise ValueError("itemsize <= 0 for cython.array")
*
*/
}
/* "View.MemoryView":138
* raise ValueError("itemsize <= 0 for cython.array")
*
* if not isinstance(format, bytes): # <<<<<<<<<<<<<<
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string
*/
__pyx_t_2 = PyBytes_Check(__pyx_v_format);
__pyx_t_4 = ((!(__pyx_t_2 != 0)) != 0);
if (__pyx_t_4) {
/* "View.MemoryView":139
*
* if not isinstance(format, bytes):
* format = format.encode('ASCII') # <<<<<<<<<<<<<<
* self._format = format # keep a reference to the byte string
* self.format = self._format
*/
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 139, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) {
__pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5);
if (likely(__pyx_t_6)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
__Pyx_INCREF(__pyx_t_6);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_5, function);
}
}
__pyx_t_3 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_6, __pyx_n_s_ASCII) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_n_s_ASCII);
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 139, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF_SET(__pyx_v_format, __pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":138
* raise ValueError("itemsize <= 0 for cython.array")
*
* if not isinstance(format, bytes): # <<<<<<<<<<<<<<
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string
*/
}
/* "View.MemoryView":140
* if not isinstance(format, bytes):
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<<
* self.format = self._format
*
*/
if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) __PYX_ERR(1, 140, __pyx_L1_error)
__pyx_t_3 = __pyx_v_format;
__Pyx_INCREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__Pyx_GOTREF(__pyx_v_self->_format);
__Pyx_DECREF(__pyx_v_self->_format);
__pyx_v_self->_format = ((PyObject*)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":141
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string
* self.format = self._format # <<<<<<<<<<<<<<
*
*
*/
if (unlikely(__pyx_v_self->_format == Py_None)) {
PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found");
__PYX_ERR(1, 141, __pyx_L1_error)
}
__pyx_t_7 = __Pyx_PyBytes_AsWritableString(__pyx_v_self->_format); if (unlikely((!__pyx_t_7) && PyErr_Occurred())) __PYX_ERR(1, 141, __pyx_L1_error)
__pyx_v_self->format = __pyx_t_7;
/* "View.MemoryView":144
*
*
* self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<<
* self._strides = self._shape + self.ndim
*
*/
__pyx_v_self->_shape = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2)));
/* "View.MemoryView":145
*
* self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2)
* self._strides = self._shape + self.ndim # <<<<<<<<<<<<<<
*
* if not self._shape:
*/
__pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim);
/* "View.MemoryView":147
* self._strides = self._shape + self.ndim
*
* if not self._shape: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate shape and strides.")
*
*/
__pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0);
if (unlikely(__pyx_t_4)) {
/* "View.MemoryView":148
*
* if not self._shape:
* raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 148, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 148, __pyx_L1_error)
/* "View.MemoryView":147
* self._strides = self._shape + self.ndim
*
* if not self._shape: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate shape and strides.")
*
*/
}
/* "View.MemoryView":151
*
*
* for idx, dim in enumerate(shape): # <<<<<<<<<<<<<<
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
*/
__pyx_t_8 = 0;
__pyx_t_3 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_3); __pyx_t_1 = 0;
for (;;) {
if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_1); __Pyx_INCREF(__pyx_t_5); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(1, 151, __pyx_L1_error)
#else
__pyx_t_5 = PySequence_ITEM(__pyx_t_3, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 151, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
__pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 151, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_dim = __pyx_t_9;
__pyx_v_idx = __pyx_t_8;
__pyx_t_8 = (__pyx_t_8 + 1);
/* "View.MemoryView":152
*
* for idx, dim in enumerate(shape):
* if dim <= 0: # <<<<<<<<<<<<<<
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
* self._shape[idx] = dim
*/
__pyx_t_4 = ((__pyx_v_dim <= 0) != 0);
if (unlikely(__pyx_t_4)) {
/* "View.MemoryView":153
* for idx, dim in enumerate(shape):
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<<
* self._shape[idx] = dim
*
*/
__pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_6);
__pyx_t_5 = 0;
__pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_6); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_Raise(__pyx_t_10, 0, 0, 0);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__PYX_ERR(1, 153, __pyx_L1_error)
/* "View.MemoryView":152
*
* for idx, dim in enumerate(shape):
* if dim <= 0: # <<<<<<<<<<<<<<
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
* self._shape[idx] = dim
*/
}
/* "View.MemoryView":154
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
* self._shape[idx] = dim # <<<<<<<<<<<<<<
*
* cdef char order
*/
(__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim;
/* "View.MemoryView":151
*
*
* for idx, dim in enumerate(shape): # <<<<<<<<<<<<<<
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
*/
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":157
*
* cdef char order
* if mode == 'fortran': # <<<<<<<<<<<<<<
* order = b'F'
* self.mode = u'fortran'
*/
__pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 157, __pyx_L1_error)
if (__pyx_t_4) {
/* "View.MemoryView":158
* cdef char order
* if mode == 'fortran':
* order = b'F' # <<<<<<<<<<<<<<
* self.mode = u'fortran'
* elif mode == 'c':
*/
__pyx_v_order = 'F';
/* "View.MemoryView":159
* if mode == 'fortran':
* order = b'F'
* self.mode = u'fortran' # <<<<<<<<<<<<<<
* elif mode == 'c':
* order = b'C'
*/
__Pyx_INCREF(__pyx_n_u_fortran);
__Pyx_GIVEREF(__pyx_n_u_fortran);
__Pyx_GOTREF(__pyx_v_self->mode);
__Pyx_DECREF(__pyx_v_self->mode);
__pyx_v_self->mode = __pyx_n_u_fortran;
/* "View.MemoryView":157
*
* cdef char order
* if mode == 'fortran': # <<<<<<<<<<<<<<
* order = b'F'
* self.mode = u'fortran'
*/
goto __pyx_L10;
}
/* "View.MemoryView":160
* order = b'F'
* self.mode = u'fortran'
* elif mode == 'c': # <<<<<<<<<<<<<<
* order = b'C'
* self.mode = u'c'
*/
__pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 160, __pyx_L1_error)
if (likely(__pyx_t_4)) {
/* "View.MemoryView":161
* self.mode = u'fortran'
* elif mode == 'c':
* order = b'C' # <<<<<<<<<<<<<<
* self.mode = u'c'
* else:
*/
__pyx_v_order = 'C';
/* "View.MemoryView":162
* elif mode == 'c':
* order = b'C'
* self.mode = u'c' # <<<<<<<<<<<<<<
* else:
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode)
*/
__Pyx_INCREF(__pyx_n_u_c);
__Pyx_GIVEREF(__pyx_n_u_c);
__Pyx_GOTREF(__pyx_v_self->mode);
__Pyx_DECREF(__pyx_v_self->mode);
__pyx_v_self->mode = __pyx_n_u_c;
/* "View.MemoryView":160
* order = b'F'
* self.mode = u'fortran'
* elif mode == 'c': # <<<<<<<<<<<<<<
* order = b'C'
* self.mode = u'c'
*/
goto __pyx_L10;
}
/* "View.MemoryView":164
* self.mode = u'c'
* else:
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<<
*
* self.len = fill_contig_strides_array(self._shape, self._strides,
*/
/*else*/ {
__pyx_t_3 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 164, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 164, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_10, 0, 0, 0);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__PYX_ERR(1, 164, __pyx_L1_error)
}
__pyx_L10:;
/* "View.MemoryView":166
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode)
*
* self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<<
* itemsize, self.ndim, order)
*
*/
__pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order);
/* "View.MemoryView":169
* itemsize, self.ndim, order)
*
* self.free_data = allocate_buffer # <<<<<<<<<<<<<<
* self.dtype_is_object = format == b'O'
* if allocate_buffer:
*/
__pyx_v_self->free_data = __pyx_v_allocate_buffer;
/* "View.MemoryView":170
*
* self.free_data = allocate_buffer
* self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<<
* if allocate_buffer:
*
*/
__pyx_t_10 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_10); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 170, __pyx_L1_error)
__pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_10); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 170, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__pyx_v_self->dtype_is_object = __pyx_t_4;
/* "View.MemoryView":171
* self.free_data = allocate_buffer
* self.dtype_is_object = format == b'O'
* if allocate_buffer: # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_4 = (__pyx_v_allocate_buffer != 0);
if (__pyx_t_4) {
/* "View.MemoryView":174
*
*
* self.data = <char *>malloc(self.len) # <<<<<<<<<<<<<<
* if not self.data:
* raise MemoryError("unable to allocate array data.")
*/
__pyx_v_self->data = ((char *)malloc(__pyx_v_self->len));
/* "View.MemoryView":175
*
* self.data = <char *>malloc(self.len)
* if not self.data: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate array data.")
*
*/
__pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0);
if (unlikely(__pyx_t_4)) {
/* "View.MemoryView":176
* self.data = <char *>malloc(self.len)
* if not self.data:
* raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<<
*
* if self.dtype_is_object:
*/
__pyx_t_10 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 176, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_Raise(__pyx_t_10, 0, 0, 0);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__PYX_ERR(1, 176, __pyx_L1_error)
/* "View.MemoryView":175
*
* self.data = <char *>malloc(self.len)
* if not self.data: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate array data.")
*
*/
}
/* "View.MemoryView":178
* raise MemoryError("unable to allocate array data.")
*
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* p = <PyObject **> self.data
* for i in range(self.len / itemsize):
*/
__pyx_t_4 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_4) {
/* "View.MemoryView":179
*
* if self.dtype_is_object:
* p = <PyObject **> self.data # <<<<<<<<<<<<<<
* for i in range(self.len / itemsize):
* p[i] = Py_None
*/
__pyx_v_p = ((PyObject **)__pyx_v_self->data);
/* "View.MemoryView":180
* if self.dtype_is_object:
* p = <PyObject **> self.data
* for i in range(self.len / itemsize): # <<<<<<<<<<<<<<
* p[i] = Py_None
* Py_INCREF(Py_None)
*/
if (unlikely(__pyx_v_itemsize == 0)) {
PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero");
__PYX_ERR(1, 180, __pyx_L1_error)
}
else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) {
PyErr_SetString(PyExc_OverflowError, "value too large to perform division");
__PYX_ERR(1, 180, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_div_Py_ssize_t(__pyx_v_self->len, __pyx_v_itemsize);
__pyx_t_9 = __pyx_t_1;
for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_9; __pyx_t_11+=1) {
__pyx_v_i = __pyx_t_11;
/* "View.MemoryView":181
* p = <PyObject **> self.data
* for i in range(self.len / itemsize):
* p[i] = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
(__pyx_v_p[__pyx_v_i]) = Py_None;
/* "View.MemoryView":182
* for i in range(self.len / itemsize):
* p[i] = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
Py_INCREF(Py_None);
}
/* "View.MemoryView":178
* raise MemoryError("unable to allocate array data.")
*
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* p = <PyObject **> self.data
* for i in range(self.len / itemsize):
*/
}
/* "View.MemoryView":171
* self.free_data = allocate_buffer
* self.dtype_is_object = format == b'O'
* if allocate_buffer: # <<<<<<<<<<<<<<
*
*
*/
}
/* "View.MemoryView":122
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode="c", bint allocate_buffer=True):
*
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_10);
__Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_format);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":185
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* cdef int bufmode = -1
* if self.mode == u"c":
*/
/* Python wrapper */
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_v_bufmode;
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
char *__pyx_t_4;
Py_ssize_t __pyx_t_5;
int __pyx_t_6;
Py_ssize_t *__pyx_t_7;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
if (__pyx_v_info == NULL) {
PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete");
return -1;
}
__Pyx_RefNannySetupContext("__getbuffer__", 0);
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
/* "View.MemoryView":186
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags):
* cdef int bufmode = -1 # <<<<<<<<<<<<<<
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
*/
__pyx_v_bufmode = -1;
/* "View.MemoryView":187
* def __getbuffer__(self, Py_buffer *info, int flags):
* cdef int bufmode = -1
* if self.mode == u"c": # <<<<<<<<<<<<<<
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran":
*/
__pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 187, __pyx_L1_error)
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":188
* cdef int bufmode = -1
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<<
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
*/
__pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS);
/* "View.MemoryView":187
* def __getbuffer__(self, Py_buffer *info, int flags):
* cdef int bufmode = -1
* if self.mode == u"c": # <<<<<<<<<<<<<<
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran":
*/
goto __pyx_L3;
}
/* "View.MemoryView":189
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran": # <<<<<<<<<<<<<<
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
*/
__pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 189, __pyx_L1_error)
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":190
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<<
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.")
*/
__pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS);
/* "View.MemoryView":189
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran": # <<<<<<<<<<<<<<
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
*/
}
__pyx_L3:;
/* "View.MemoryView":191
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode): # <<<<<<<<<<<<<<
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data
*/
__pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":192
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<<
* info.buf = self.data
* info.len = self.len
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 192, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 192, __pyx_L1_error)
/* "View.MemoryView":191
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode): # <<<<<<<<<<<<<<
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data
*/
}
/* "View.MemoryView":193
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data # <<<<<<<<<<<<<<
* info.len = self.len
* info.ndim = self.ndim
*/
__pyx_t_4 = __pyx_v_self->data;
__pyx_v_info->buf = __pyx_t_4;
/* "View.MemoryView":194
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data
* info.len = self.len # <<<<<<<<<<<<<<
* info.ndim = self.ndim
* info.shape = self._shape
*/
__pyx_t_5 = __pyx_v_self->len;
__pyx_v_info->len = __pyx_t_5;
/* "View.MemoryView":195
* info.buf = self.data
* info.len = self.len
* info.ndim = self.ndim # <<<<<<<<<<<<<<
* info.shape = self._shape
* info.strides = self._strides
*/
__pyx_t_6 = __pyx_v_self->ndim;
__pyx_v_info->ndim = __pyx_t_6;
/* "View.MemoryView":196
* info.len = self.len
* info.ndim = self.ndim
* info.shape = self._shape # <<<<<<<<<<<<<<
* info.strides = self._strides
* info.suboffsets = NULL
*/
__pyx_t_7 = __pyx_v_self->_shape;
__pyx_v_info->shape = __pyx_t_7;
/* "View.MemoryView":197
* info.ndim = self.ndim
* info.shape = self._shape
* info.strides = self._strides # <<<<<<<<<<<<<<
* info.suboffsets = NULL
* info.itemsize = self.itemsize
*/
__pyx_t_7 = __pyx_v_self->_strides;
__pyx_v_info->strides = __pyx_t_7;
/* "View.MemoryView":198
* info.shape = self._shape
* info.strides = self._strides
* info.suboffsets = NULL # <<<<<<<<<<<<<<
* info.itemsize = self.itemsize
* info.readonly = 0
*/
__pyx_v_info->suboffsets = NULL;
/* "View.MemoryView":199
* info.strides = self._strides
* info.suboffsets = NULL
* info.itemsize = self.itemsize # <<<<<<<<<<<<<<
* info.readonly = 0
*
*/
__pyx_t_5 = __pyx_v_self->itemsize;
__pyx_v_info->itemsize = __pyx_t_5;
/* "View.MemoryView":200
* info.suboffsets = NULL
* info.itemsize = self.itemsize
* info.readonly = 0 # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
__pyx_v_info->readonly = 0;
/* "View.MemoryView":202
* info.readonly = 0
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.format
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":203
*
* if flags & PyBUF_FORMAT:
* info.format = self.format # <<<<<<<<<<<<<<
* else:
* info.format = NULL
*/
__pyx_t_4 = __pyx_v_self->format;
__pyx_v_info->format = __pyx_t_4;
/* "View.MemoryView":202
* info.readonly = 0
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.format
* else:
*/
goto __pyx_L5;
}
/* "View.MemoryView":205
* info.format = self.format
* else:
* info.format = NULL # <<<<<<<<<<<<<<
*
* info.obj = self
*/
/*else*/ {
__pyx_v_info->format = NULL;
}
__pyx_L5:;
/* "View.MemoryView":207
* info.format = NULL
*
* info.obj = self # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = ((PyObject *)__pyx_v_self);
/* "View.MemoryView":185
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* cdef int bufmode = -1
* if self.mode == u"c":
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
if (__pyx_v_info->obj != NULL) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
goto __pyx_L2;
__pyx_L0:;
if (__pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
__pyx_L2:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":211
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*
* def __dealloc__(array self): # <<<<<<<<<<<<<<
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
*/
/* Python wrapper */
static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_array___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) {
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":212
*
* def __dealloc__(array self):
* if self.callback_free_data != NULL: # <<<<<<<<<<<<<<
* self.callback_free_data(self.data)
* elif self.free_data:
*/
__pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":213
* def __dealloc__(array self):
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data) # <<<<<<<<<<<<<<
* elif self.free_data:
* if self.dtype_is_object:
*/
__pyx_v_self->callback_free_data(__pyx_v_self->data);
/* "View.MemoryView":212
*
* def __dealloc__(array self):
* if self.callback_free_data != NULL: # <<<<<<<<<<<<<<
* self.callback_free_data(self.data)
* elif self.free_data:
*/
goto __pyx_L3;
}
/* "View.MemoryView":214
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
* elif self.free_data: # <<<<<<<<<<<<<<
* if self.dtype_is_object:
* refcount_objects_in_slice(self.data, self._shape,
*/
__pyx_t_1 = (__pyx_v_self->free_data != 0);
if (__pyx_t_1) {
/* "View.MemoryView":215
* self.callback_free_data(self.data)
* elif self.free_data:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice(self.data, self._shape,
* self._strides, self.ndim, False)
*/
__pyx_t_1 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_1) {
/* "View.MemoryView":216
* elif self.free_data:
* if self.dtype_is_object:
* refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<<
* self._strides, self.ndim, False)
* free(self.data)
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0);
/* "View.MemoryView":215
* self.callback_free_data(self.data)
* elif self.free_data:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice(self.data, self._shape,
* self._strides, self.ndim, False)
*/
}
/* "View.MemoryView":218
* refcount_objects_in_slice(self.data, self._shape,
* self._strides, self.ndim, False)
* free(self.data) # <<<<<<<<<<<<<<
* PyObject_Free(self._shape)
*
*/
free(__pyx_v_self->data);
/* "View.MemoryView":214
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
* elif self.free_data: # <<<<<<<<<<<<<<
* if self.dtype_is_object:
* refcount_objects_in_slice(self.data, self._shape,
*/
}
__pyx_L3:;
/* "View.MemoryView":219
* self._strides, self.ndim, False)
* free(self.data)
* PyObject_Free(self._shape) # <<<<<<<<<<<<<<
*
* @property
*/
PyObject_Free(__pyx_v_self->_shape);
/* "View.MemoryView":211
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*
* def __dealloc__(array self): # <<<<<<<<<<<<<<
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":222
*
* @property
* def memview(self): # <<<<<<<<<<<<<<
* return self.get_memview()
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":223
* @property
* def memview(self):
* return self.get_memview() # <<<<<<<<<<<<<<
*
* @cname('get_memview')
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = ((struct __pyx_vtabstruct_array *)__pyx_v_self->__pyx_vtab)->get_memview(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 223, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":222
*
* @property
* def memview(self): # <<<<<<<<<<<<<<
* return self.get_memview()
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":226
*
* @cname('get_memview')
* cdef get_memview(self): # <<<<<<<<<<<<<<
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
* return memoryview(self, flags, self.dtype_is_object)
*/
static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self) {
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("get_memview", 0);
/* "View.MemoryView":227
* @cname('get_memview')
* cdef get_memview(self):
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<<
* return memoryview(self, flags, self.dtype_is_object)
*
*/
__pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE);
/* "View.MemoryView":228
* cdef get_memview(self):
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
* return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<<
*
* def __len__(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 228, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 228, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":226
*
* @cname('get_memview')
* cdef get_memview(self): # <<<<<<<<<<<<<<
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
* return memoryview(self, flags, self.dtype_is_object)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.array.get_memview", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":230
* return memoryview(self, flags, self.dtype_is_object)
*
* def __len__(self): # <<<<<<<<<<<<<<
* return self._shape[0]
*
*/
/* Python wrapper */
static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self); /*proto*/
static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__len__", 0);
/* "View.MemoryView":231
*
* def __len__(self):
* return self._shape[0] # <<<<<<<<<<<<<<
*
* def __getattr__(self, attr):
*/
__pyx_r = (__pyx_v_self->_shape[0]);
goto __pyx_L0;
/* "View.MemoryView":230
* return memoryview(self, flags, self.dtype_is_object)
*
* def __len__(self): # <<<<<<<<<<<<<<
* return self._shape[0]
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":233
* return self._shape[0]
*
* def __getattr__(self, attr): # <<<<<<<<<<<<<<
* return getattr(self.memview, attr)
*
*/
/* Python wrapper */
static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/
static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__getattr__", 0);
/* "View.MemoryView":234
*
* def __getattr__(self, attr):
* return getattr(self.memview, attr) # <<<<<<<<<<<<<<
*
* def __getitem__(self, item):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 234, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 234, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":233
* return self._shape[0]
*
* def __getattr__(self, attr): # <<<<<<<<<<<<<<
* return getattr(self.memview, attr)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":236
* return getattr(self.memview, attr)
*
* def __getitem__(self, item): # <<<<<<<<<<<<<<
* return self.memview[item]
*
*/
/* Python wrapper */
static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/
static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__getitem__", 0);
/* "View.MemoryView":237
*
* def __getitem__(self, item):
* return self.memview[item] # <<<<<<<<<<<<<<
*
* def __setitem__(self, item, value):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 237, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 237, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":236
* return getattr(self.memview, attr)
*
* def __getitem__(self, item): # <<<<<<<<<<<<<<
* return self.memview[item]
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":239
* return self.memview[item]
*
* def __setitem__(self, item, value): # <<<<<<<<<<<<<<
* self.memview[item] = value
*
*/
/* Python wrapper */
static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/
static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setitem__", 0);
/* "View.MemoryView":240
*
* def __setitem__(self, item, value):
* self.memview[item] = value # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 240, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) __PYX_ERR(1, 240, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "View.MemoryView":239
* return self.memview[item]
*
* def __setitem__(self, item, value): # <<<<<<<<<<<<<<
* self.memview[item] = value
*
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_array___reduce_cython__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 2, __pyx_L1_error)
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_array_2__setstate_cython__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 4, __pyx_L1_error)
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":244
*
* @cname("__pyx_array_new")
* cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<<
* char *mode, char *buf):
* cdef array result
*/
static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) {
struct __pyx_array_obj *__pyx_v_result = 0;
struct __pyx_array_obj *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("array_cwrapper", 0);
/* "View.MemoryView":248
* cdef array result
*
* if buf == NULL: # <<<<<<<<<<<<<<
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
*/
__pyx_t_1 = ((__pyx_v_buf == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":249
*
* if buf == NULL:
* result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<<
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'),
*/
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(__pyx_v_shape);
__Pyx_GIVEREF(__pyx_v_shape);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4);
__pyx_t_2 = 0;
__pyx_t_3 = 0;
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4);
__pyx_t_4 = 0;
/* "View.MemoryView":248
* cdef array result
*
* if buf == NULL: # <<<<<<<<<<<<<<
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":251
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<<
* allocate_buffer=False)
* result.data = buf
*/
/*else*/ {
__pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_v_shape);
__Pyx_GIVEREF(__pyx_v_shape);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3);
__pyx_t_4 = 0;
__pyx_t_5 = 0;
__pyx_t_3 = 0;
/* "View.MemoryView":252
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'),
* allocate_buffer=False) # <<<<<<<<<<<<<<
* result.data = buf
*
*/
__pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 252, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) __PYX_ERR(1, 252, __pyx_L1_error)
/* "View.MemoryView":251
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<<
* allocate_buffer=False)
* result.data = buf
*/
__pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5);
__pyx_t_5 = 0;
/* "View.MemoryView":253
* result = array(shape, itemsize, format, mode.decode('ASCII'),
* allocate_buffer=False)
* result.data = buf # <<<<<<<<<<<<<<
*
* return result
*/
__pyx_v_result->data = __pyx_v_buf;
}
__pyx_L3:;
/* "View.MemoryView":255
* result.data = buf
*
* return result # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(((PyObject *)__pyx_r));
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = __pyx_v_result;
goto __pyx_L0;
/* "View.MemoryView":244
*
* @cname("__pyx_array_new")
* cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<<
* char *mode, char *buf):
* cdef array result
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF((PyObject *)__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":281
* cdef class Enum(object):
* cdef object name
* def __init__(self, name): # <<<<<<<<<<<<<<
* self.name = name
* def __repr__(self):
*/
/* Python wrapper */
static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_name = 0;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0};
PyObject* values[1] = {0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(1, 281, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 1) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
}
__pyx_v_name = values[0];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 281, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__init__", 0);
/* "View.MemoryView":282
* cdef object name
* def __init__(self, name):
* self.name = name # <<<<<<<<<<<<<<
* def __repr__(self):
* return self.name
*/
__Pyx_INCREF(__pyx_v_name);
__Pyx_GIVEREF(__pyx_v_name);
__Pyx_GOTREF(__pyx_v_self->name);
__Pyx_DECREF(__pyx_v_self->name);
__pyx_v_self->name = __pyx_v_name;
/* "View.MemoryView":281
* cdef class Enum(object):
* cdef object name
* def __init__(self, name): # <<<<<<<<<<<<<<
* self.name = name
* def __repr__(self):
*/
/* function exit code */
__pyx_r = 0;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":283
* def __init__(self, name):
* self.name = name
* def __repr__(self): # <<<<<<<<<<<<<<
* return self.name
*
*/
/* Python wrapper */
static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
__pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__", 0);
/* "View.MemoryView":284
* self.name = name
* def __repr__(self):
* return self.name # <<<<<<<<<<<<<<
*
* cdef generic = Enum("<strided and direct or indirect>")
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->name);
__pyx_r = __pyx_v_self->name;
goto __pyx_L0;
/* "View.MemoryView":283
* def __init__(self, name):
* self.name = name
* def __repr__(self): # <<<<<<<<<<<<<<
* return self.name
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* cdef tuple state
* cdef object _dict
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_MemviewEnum___reduce_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self) {
PyObject *__pyx_v_state = 0;
PyObject *__pyx_v__dict = 0;
int __pyx_v_use_setstate;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_t_3;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":5
* cdef object _dict
* cdef bint use_setstate
* state = (self.name,) # <<<<<<<<<<<<<<
* _dict = getattr(self, '__dict__', None)
* if _dict is not None:
*/
__pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(__pyx_v_self->name);
__Pyx_GIVEREF(__pyx_v_self->name);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->name);
__pyx_v_state = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
/* "(tree fragment)":6
* cdef bint use_setstate
* state = (self.name,)
* _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<<
* if _dict is not None:
* state += (_dict,)
*/
__pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v__dict = __pyx_t_1;
__pyx_t_1 = 0;
/* "(tree fragment)":7
* state = (self.name,)
* _dict = getattr(self, '__dict__', None)
* if _dict is not None: # <<<<<<<<<<<<<<
* state += (_dict,)
* use_setstate = True
*/
__pyx_t_2 = (__pyx_v__dict != Py_None);
__pyx_t_3 = (__pyx_t_2 != 0);
if (__pyx_t_3) {
/* "(tree fragment)":8
* _dict = getattr(self, '__dict__', None)
* if _dict is not None:
* state += (_dict,) # <<<<<<<<<<<<<<
* use_setstate = True
* else:
*/
__pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(__pyx_v__dict);
__Pyx_GIVEREF(__pyx_v__dict);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict);
__pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4));
__pyx_t_4 = 0;
/* "(tree fragment)":9
* if _dict is not None:
* state += (_dict,)
* use_setstate = True # <<<<<<<<<<<<<<
* else:
* use_setstate = self.name is not None
*/
__pyx_v_use_setstate = 1;
/* "(tree fragment)":7
* state = (self.name,)
* _dict = getattr(self, '__dict__', None)
* if _dict is not None: # <<<<<<<<<<<<<<
* state += (_dict,)
* use_setstate = True
*/
goto __pyx_L3;
}
/* "(tree fragment)":11
* use_setstate = True
* else:
* use_setstate = self.name is not None # <<<<<<<<<<<<<<
* if use_setstate:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
*/
/*else*/ {
__pyx_t_3 = (__pyx_v_self->name != Py_None);
__pyx_v_use_setstate = __pyx_t_3;
}
__pyx_L3:;
/* "(tree fragment)":12
* else:
* use_setstate = self.name is not None
* if use_setstate: # <<<<<<<<<<<<<<
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
* else:
*/
__pyx_t_3 = (__pyx_v_use_setstate != 0);
if (__pyx_t_3) {
/* "(tree fragment)":13
* use_setstate = self.name is not None
* if use_setstate:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state # <<<<<<<<<<<<<<
* else:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_INCREF(__pyx_int_184977713);
__Pyx_GIVEREF(__pyx_int_184977713);
PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None);
__pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1);
__Pyx_INCREF(__pyx_v_state);
__Pyx_GIVEREF(__pyx_v_state);
PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state);
__pyx_t_4 = 0;
__pyx_t_1 = 0;
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "(tree fragment)":12
* else:
* use_setstate = self.name is not None
* if use_setstate: # <<<<<<<<<<<<<<
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
* else:
*/
}
/* "(tree fragment)":15
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
* else:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state) # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* __pyx_unpickle_Enum__set_state(self, __pyx_state)
*/
/*else*/ {
__Pyx_XDECREF(__pyx_r);
__Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_INCREF(__pyx_int_184977713);
__Pyx_GIVEREF(__pyx_int_184977713);
PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713);
__Pyx_INCREF(__pyx_v_state);
__Pyx_GIVEREF(__pyx_v_state);
PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state);
__pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1);
__pyx_t_5 = 0;
__pyx_t_1 = 0;
__pyx_r = __pyx_t_4;
__pyx_t_4 = 0;
goto __pyx_L0;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* cdef tuple state
* cdef object _dict
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.Enum.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_state);
__Pyx_XDECREF(__pyx_v__dict);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":16
* else:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* __pyx_unpickle_Enum__set_state(self, __pyx_state)
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_MemviewEnum_2__setstate_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":17
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
* def __setstate_cython__(self, __pyx_state):
* __pyx_unpickle_Enum__set_state(self, __pyx_state) # <<<<<<<<<<<<<<
*/
if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 17, __pyx_L1_error)
__pyx_t_1 = __pyx_unpickle_Enum__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 17, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":16
* else:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* __pyx_unpickle_Enum__set_state(self, __pyx_state)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.Enum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":298
*
* @cname('__pyx_align_pointer')
* cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<<
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory
*/
static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) {
Py_intptr_t __pyx_v_aligned_p;
size_t __pyx_v_offset;
void *__pyx_r;
int __pyx_t_1;
/* "View.MemoryView":300
* cdef void *align_pointer(void *memory, size_t alignment) nogil:
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory # <<<<<<<<<<<<<<
* cdef size_t offset
*
*/
__pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory);
/* "View.MemoryView":304
*
* with cython.cdivision(True):
* offset = aligned_p % alignment # <<<<<<<<<<<<<<
*
* if offset > 0:
*/
__pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment);
/* "View.MemoryView":306
* offset = aligned_p % alignment
*
* if offset > 0: # <<<<<<<<<<<<<<
* aligned_p += alignment - offset
*
*/
__pyx_t_1 = ((__pyx_v_offset > 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":307
*
* if offset > 0:
* aligned_p += alignment - offset # <<<<<<<<<<<<<<
*
* return <void *> aligned_p
*/
__pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset));
/* "View.MemoryView":306
* offset = aligned_p % alignment
*
* if offset > 0: # <<<<<<<<<<<<<<
* aligned_p += alignment - offset
*
*/
}
/* "View.MemoryView":309
* aligned_p += alignment - offset
*
* return <void *> aligned_p # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = ((void *)__pyx_v_aligned_p);
goto __pyx_L0;
/* "View.MemoryView":298
*
* @cname('__pyx_align_pointer')
* cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<<
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":345
* cdef __Pyx_TypeInfo *typeinfo
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<<
* self.obj = obj
* self.flags = flags
*/
/* Python wrapper */
static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_obj = 0;
int __pyx_v_flags;
int __pyx_v_dtype_is_object;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); __PYX_ERR(1, 345, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dtype_is_object);
if (value) { values[2] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 345, __pyx_L3_error)
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_obj = values[0];
__pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error)
if (values[2]) {
__pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error)
} else {
__pyx_v_dtype_is_object = ((int)0);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 345, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__cinit__", 0);
/* "View.MemoryView":346
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False):
* self.obj = obj # <<<<<<<<<<<<<<
* self.flags = flags
* if type(self) is memoryview or obj is not None:
*/
__Pyx_INCREF(__pyx_v_obj);
__Pyx_GIVEREF(__pyx_v_obj);
__Pyx_GOTREF(__pyx_v_self->obj);
__Pyx_DECREF(__pyx_v_self->obj);
__pyx_v_self->obj = __pyx_v_obj;
/* "View.MemoryView":347
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False):
* self.obj = obj
* self.flags = flags # <<<<<<<<<<<<<<
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags)
*/
__pyx_v_self->flags = __pyx_v_flags;
/* "View.MemoryView":348
* self.obj = obj
* self.flags = flags
* if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<<
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL:
*/
__pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type));
__pyx_t_3 = (__pyx_t_2 != 0);
if (!__pyx_t_3) {
} else {
__pyx_t_1 = __pyx_t_3;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_3 = (__pyx_v_obj != Py_None);
__pyx_t_2 = (__pyx_t_3 != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L4_bool_binop_done:;
if (__pyx_t_1) {
/* "View.MemoryView":349
* self.flags = flags
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<<
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None
*/
__pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 349, __pyx_L1_error)
/* "View.MemoryView":350
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<<
* (<__pyx_buffer *> &self.view).obj = Py_None
* Py_INCREF(Py_None)
*/
__pyx_t_1 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":351
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None;
/* "View.MemoryView":352
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* global __pyx_memoryview_thread_locks_used
*/
Py_INCREF(Py_None);
/* "View.MemoryView":350
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<<
* (<__pyx_buffer *> &self.view).obj = Py_None
* Py_INCREF(Py_None)
*/
}
/* "View.MemoryView":348
* self.obj = obj
* self.flags = flags
* if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<<
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL:
*/
}
/* "View.MemoryView":355
*
* global __pyx_memoryview_thread_locks_used
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<<
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
*/
__pyx_t_1 = ((__pyx_memoryview_thread_locks_used < 8) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":356
* global __pyx_memoryview_thread_locks_used
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED:
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL:
*/
__pyx_v_self->lock = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]);
/* "View.MemoryView":357
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED:
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1 # <<<<<<<<<<<<<<
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock()
*/
__pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used + 1);
/* "View.MemoryView":355
*
* global __pyx_memoryview_thread_locks_used
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<<
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
*/
}
/* "View.MemoryView":358
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL: # <<<<<<<<<<<<<<
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL:
*/
__pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":359
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<<
* if self.lock is NULL:
* raise MemoryError
*/
__pyx_v_self->lock = PyThread_allocate_lock();
/* "View.MemoryView":360
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL: # <<<<<<<<<<<<<<
* raise MemoryError
*
*/
__pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":361
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL:
* raise MemoryError # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
PyErr_NoMemory(); __PYX_ERR(1, 361, __pyx_L1_error)
/* "View.MemoryView":360
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL: # <<<<<<<<<<<<<<
* raise MemoryError
*
*/
}
/* "View.MemoryView":358
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL: # <<<<<<<<<<<<<<
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL:
*/
}
/* "View.MemoryView":363
* raise MemoryError
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":364
*
* if flags & PyBUF_FORMAT:
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') # <<<<<<<<<<<<<<
* else:
* self.dtype_is_object = dtype_is_object
*/
__pyx_t_2 = (((__pyx_v_self->view.format[0]) == 'O') != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L11_bool_binop_done;
}
__pyx_t_2 = (((__pyx_v_self->view.format[1]) == '\x00') != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L11_bool_binop_done:;
__pyx_v_self->dtype_is_object = __pyx_t_1;
/* "View.MemoryView":363
* raise MemoryError
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
* else:
*/
goto __pyx_L10;
}
/* "View.MemoryView":366
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
* else:
* self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<<
*
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer(
*/
/*else*/ {
__pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object;
}
__pyx_L10:;
/* "View.MemoryView":368
* self.dtype_is_object = dtype_is_object
*
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<<
* <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int))
* self.typeinfo = NULL
*/
__pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int))));
/* "View.MemoryView":370
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer(
* <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int))
* self.typeinfo = NULL # <<<<<<<<<<<<<<
*
* def __dealloc__(memoryview self):
*/
__pyx_v_self->typeinfo = NULL;
/* "View.MemoryView":345
* cdef __Pyx_TypeInfo *typeinfo
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<<
* self.obj = obj
* self.flags = flags
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":372
* self.typeinfo = NULL
*
* def __dealloc__(memoryview self): # <<<<<<<<<<<<<<
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
*/
/* Python wrapper */
static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) {
int __pyx_v_i;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
PyThread_type_lock __pyx_t_6;
PyThread_type_lock __pyx_t_7;
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":373
*
* def __dealloc__(memoryview self):
* if self.obj is not None: # <<<<<<<<<<<<<<
* __Pyx_ReleaseBuffer(&self.view)
* elif (<__pyx_buffer *> &self.view).obj == Py_None:
*/
__pyx_t_1 = (__pyx_v_self->obj != Py_None);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":374
* def __dealloc__(memoryview self):
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<<
* elif (<__pyx_buffer *> &self.view).obj == Py_None:
*
*/
__Pyx_ReleaseBuffer((&__pyx_v_self->view));
/* "View.MemoryView":373
*
* def __dealloc__(memoryview self):
* if self.obj is not None: # <<<<<<<<<<<<<<
* __Pyx_ReleaseBuffer(&self.view)
* elif (<__pyx_buffer *> &self.view).obj == Py_None:
*/
goto __pyx_L3;
}
/* "View.MemoryView":375
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
* elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<<
*
* (<__pyx_buffer *> &self.view).obj = NULL
*/
__pyx_t_2 = ((((Py_buffer *)(&__pyx_v_self->view))->obj == Py_None) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":377
* elif (<__pyx_buffer *> &self.view).obj == Py_None:
*
* (<__pyx_buffer *> &self.view).obj = NULL # <<<<<<<<<<<<<<
* Py_DECREF(Py_None)
*
*/
((Py_buffer *)(&__pyx_v_self->view))->obj = NULL;
/* "View.MemoryView":378
*
* (<__pyx_buffer *> &self.view).obj = NULL
* Py_DECREF(Py_None) # <<<<<<<<<<<<<<
*
* cdef int i
*/
Py_DECREF(Py_None);
/* "View.MemoryView":375
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
* elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<<
*
* (<__pyx_buffer *> &self.view).obj = NULL
*/
}
__pyx_L3:;
/* "View.MemoryView":382
* cdef int i
* global __pyx_memoryview_thread_locks_used
* if self.lock != NULL: # <<<<<<<<<<<<<<
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock:
*/
__pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":383
* global __pyx_memoryview_thread_locks_used
* if self.lock != NULL:
* for i in range(__pyx_memoryview_thread_locks_used): # <<<<<<<<<<<<<<
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1
*/
__pyx_t_3 = __pyx_memoryview_thread_locks_used;
__pyx_t_4 = __pyx_t_3;
for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) {
__pyx_v_i = __pyx_t_5;
/* "View.MemoryView":384
* if self.lock != NULL:
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used:
*/
__pyx_t_2 = (((__pyx_memoryview_thread_locks[__pyx_v_i]) == __pyx_v_self->lock) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":385
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1 # <<<<<<<<<<<<<<
* if i != __pyx_memoryview_thread_locks_used:
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
*/
__pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used - 1);
/* "View.MemoryView":386
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
*/
__pyx_t_2 = ((__pyx_v_i != __pyx_memoryview_thread_locks_used) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":388
* if i != __pyx_memoryview_thread_locks_used:
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) # <<<<<<<<<<<<<<
* break
* else:
*/
__pyx_t_6 = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]);
__pyx_t_7 = (__pyx_memoryview_thread_locks[__pyx_v_i]);
/* "View.MemoryView":387
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used:
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
* break
*/
(__pyx_memoryview_thread_locks[__pyx_v_i]) = __pyx_t_6;
(__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]) = __pyx_t_7;
/* "View.MemoryView":386
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
*/
}
/* "View.MemoryView":389
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
* break # <<<<<<<<<<<<<<
* else:
* PyThread_free_lock(self.lock)
*/
goto __pyx_L6_break;
/* "View.MemoryView":384
* if self.lock != NULL:
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used:
*/
}
}
/*else*/ {
/* "View.MemoryView":391
* break
* else:
* PyThread_free_lock(self.lock) # <<<<<<<<<<<<<<
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL:
*/
PyThread_free_lock(__pyx_v_self->lock);
}
__pyx_L6_break:;
/* "View.MemoryView":382
* cdef int i
* global __pyx_memoryview_thread_locks_used
* if self.lock != NULL: # <<<<<<<<<<<<<<
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock:
*/
}
/* "View.MemoryView":372
* self.typeinfo = NULL
*
* def __dealloc__(memoryview self): # <<<<<<<<<<<<<<
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":393
* PyThread_free_lock(self.lock)
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<<
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf
*/
static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) {
Py_ssize_t __pyx_v_dim;
char *__pyx_v_itemp;
PyObject *__pyx_v_idx = NULL;
char *__pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
Py_ssize_t __pyx_t_3;
PyObject *(*__pyx_t_4)(PyObject *);
PyObject *__pyx_t_5 = NULL;
Py_ssize_t __pyx_t_6;
char *__pyx_t_7;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("get_item_pointer", 0);
/* "View.MemoryView":395
* cdef char *get_item_pointer(memoryview self, object index) except NULL:
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf # <<<<<<<<<<<<<<
*
* for dim, idx in enumerate(index):
*/
__pyx_v_itemp = ((char *)__pyx_v_self->view.buf);
/* "View.MemoryView":397
* cdef char *itemp = <char *> self.view.buf
*
* for dim, idx in enumerate(index): # <<<<<<<<<<<<<<
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
*/
__pyx_t_1 = 0;
if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) {
__pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0;
__pyx_t_4 = NULL;
} else {
__pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 397, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 397, __pyx_L1_error)
}
for (;;) {
if (likely(!__pyx_t_4)) {
if (likely(PyList_CheckExact(__pyx_t_2))) {
if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 397, __pyx_L1_error)
#else
__pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 397, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
} else {
if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 397, __pyx_L1_error)
#else
__pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 397, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
}
} else {
__pyx_t_5 = __pyx_t_4(__pyx_t_2);
if (unlikely(!__pyx_t_5)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else __PYX_ERR(1, 397, __pyx_L1_error)
}
break;
}
__Pyx_GOTREF(__pyx_t_5);
}
__Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5);
__pyx_t_5 = 0;
__pyx_v_dim = __pyx_t_1;
__pyx_t_1 = (__pyx_t_1 + 1);
/* "View.MemoryView":398
*
* for dim, idx in enumerate(index):
* itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<<
*
* return itemp
*/
__pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 398, __pyx_L1_error)
__pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(1, 398, __pyx_L1_error)
__pyx_v_itemp = __pyx_t_7;
/* "View.MemoryView":397
* cdef char *itemp = <char *> self.view.buf
*
* for dim, idx in enumerate(index): # <<<<<<<<<<<<<<
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
*/
}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":400
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
* return itemp # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_itemp;
goto __pyx_L0;
/* "View.MemoryView":393
* PyThread_free_lock(self.lock)
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<<
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_idx);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":403
*
*
* def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<<
* if index is Ellipsis:
* return self
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/
static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) {
PyObject *__pyx_v_have_slices = NULL;
PyObject *__pyx_v_indices = NULL;
char *__pyx_v_itemp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
char *__pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__getitem__", 0);
/* "View.MemoryView":404
*
* def __getitem__(memoryview self, object index):
* if index is Ellipsis: # <<<<<<<<<<<<<<
* return self
*
*/
__pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":405
* def __getitem__(memoryview self, object index):
* if index is Ellipsis:
* return self # <<<<<<<<<<<<<<
*
* have_slices, indices = _unellipsify(index, self.view.ndim)
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__pyx_r = ((PyObject *)__pyx_v_self);
goto __pyx_L0;
/* "View.MemoryView":404
*
* def __getitem__(memoryview self, object index):
* if index is Ellipsis: # <<<<<<<<<<<<<<
* return self
*
*/
}
/* "View.MemoryView":407
* return self
*
* have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<<
*
* cdef char *itemp
*/
__pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 407, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (likely(__pyx_t_3 != Py_None)) {
PyObject* sequence = __pyx_t_3;
Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
__PYX_ERR(1, 407, __pyx_L1_error)
}
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_4 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_5 = PyTuple_GET_ITEM(sequence, 1);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(__pyx_t_5);
#else
__pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 407, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 407, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
} else {
__Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 407, __pyx_L1_error)
}
__pyx_v_have_slices = __pyx_t_4;
__pyx_t_4 = 0;
__pyx_v_indices = __pyx_t_5;
__pyx_t_5 = 0;
/* "View.MemoryView":410
*
* cdef char *itemp
* if have_slices: # <<<<<<<<<<<<<<
* return memview_slice(self, indices)
* else:
*/
__pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 410, __pyx_L1_error)
if (__pyx_t_2) {
/* "View.MemoryView":411
* cdef char *itemp
* if have_slices:
* return memview_slice(self, indices) # <<<<<<<<<<<<<<
* else:
* itemp = self.get_item_pointer(indices)
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 411, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":410
*
* cdef char *itemp
* if have_slices: # <<<<<<<<<<<<<<
* return memview_slice(self, indices)
* else:
*/
}
/* "View.MemoryView":413
* return memview_slice(self, indices)
* else:
* itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<<
* return self.convert_item_to_object(itemp)
*
*/
/*else*/ {
__pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == ((char *)NULL))) __PYX_ERR(1, 413, __pyx_L1_error)
__pyx_v_itemp = __pyx_t_6;
/* "View.MemoryView":414
* else:
* itemp = self.get_item_pointer(indices)
* return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<<
*
* def __setitem__(memoryview self, object index, object value):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 414, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":403
*
*
* def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<<
* if index is Ellipsis:
* return self
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_have_slices);
__Pyx_XDECREF(__pyx_v_indices);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":416
* return self.convert_item_to_object(itemp)
*
* def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<<
* if self.view.readonly:
* raise TypeError("Cannot assign to read-only memoryview")
*/
/* Python wrapper */
static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/
static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
PyObject *__pyx_v_have_slices = NULL;
PyObject *__pyx_v_obj = NULL;
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setitem__", 0);
__Pyx_INCREF(__pyx_v_index);
/* "View.MemoryView":417
*
* def __setitem__(memoryview self, object index, object value):
* if self.view.readonly: # <<<<<<<<<<<<<<
* raise TypeError("Cannot assign to read-only memoryview")
*
*/
__pyx_t_1 = (__pyx_v_self->view.readonly != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":418
* def __setitem__(memoryview self, object index, object value):
* if self.view.readonly:
* raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<<
*
* have_slices, index = _unellipsify(index, self.view.ndim)
*/
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 418, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__PYX_ERR(1, 418, __pyx_L1_error)
/* "View.MemoryView":417
*
* def __setitem__(memoryview self, object index, object value):
* if self.view.readonly: # <<<<<<<<<<<<<<
* raise TypeError("Cannot assign to read-only memoryview")
*
*/
}
/* "View.MemoryView":420
* raise TypeError("Cannot assign to read-only memoryview")
*
* have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<<
*
* if have_slices:
*/
__pyx_t_2 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 420, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (likely(__pyx_t_2 != Py_None)) {
PyObject* sequence = __pyx_t_2;
Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
__PYX_ERR(1, 420, __pyx_L1_error)
}
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_3 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_4 = PyTuple_GET_ITEM(sequence, 1);
__Pyx_INCREF(__pyx_t_3);
__Pyx_INCREF(__pyx_t_4);
#else
__pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 420, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 420, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
#endif
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
} else {
__Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 420, __pyx_L1_error)
}
__pyx_v_have_slices = __pyx_t_3;
__pyx_t_3 = 0;
__Pyx_DECREF_SET(__pyx_v_index, __pyx_t_4);
__pyx_t_4 = 0;
/* "View.MemoryView":422
* have_slices, index = _unellipsify(index, self.view.ndim)
*
* if have_slices: # <<<<<<<<<<<<<<
* obj = self.is_slice(value)
* if obj:
*/
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 422, __pyx_L1_error)
if (__pyx_t_1) {
/* "View.MemoryView":423
*
* if have_slices:
* obj = self.is_slice(value) # <<<<<<<<<<<<<<
* if obj:
* self.setitem_slice_assignment(self[index], obj)
*/
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 423, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_v_obj = __pyx_t_2;
__pyx_t_2 = 0;
/* "View.MemoryView":424
* if have_slices:
* obj = self.is_slice(value)
* if obj: # <<<<<<<<<<<<<<
* self.setitem_slice_assignment(self[index], obj)
* else:
*/
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 424, __pyx_L1_error)
if (__pyx_t_1) {
/* "View.MemoryView":425
* obj = self.is_slice(value)
* if obj:
* self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<<
* else:
* self.setitem_slice_assign_scalar(self[index], value)
*/
__pyx_t_2 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 425, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_2, __pyx_v_obj); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 425, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
/* "View.MemoryView":424
* if have_slices:
* obj = self.is_slice(value)
* if obj: # <<<<<<<<<<<<<<
* self.setitem_slice_assignment(self[index], obj)
* else:
*/
goto __pyx_L5;
}
/* "View.MemoryView":427
* self.setitem_slice_assignment(self[index], obj)
* else:
* self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<<
* else:
* self.setitem_indexed(index, value)
*/
/*else*/ {
__pyx_t_4 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 427, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_memoryview_type))))) __PYX_ERR(1, 427, __pyx_L1_error)
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_4), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 427, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__pyx_L5:;
/* "View.MemoryView":422
* have_slices, index = _unellipsify(index, self.view.ndim)
*
* if have_slices: # <<<<<<<<<<<<<<
* obj = self.is_slice(value)
* if obj:
*/
goto __pyx_L4;
}
/* "View.MemoryView":429
* self.setitem_slice_assign_scalar(self[index], value)
* else:
* self.setitem_indexed(index, value) # <<<<<<<<<<<<<<
*
* cdef is_slice(self, obj):
*/
/*else*/ {
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 429, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__pyx_L4:;
/* "View.MemoryView":416
* return self.convert_item_to_object(itemp)
*
* def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<<
* if self.view.readonly:
* raise TypeError("Cannot assign to read-only memoryview")
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_have_slices);
__Pyx_XDECREF(__pyx_v_obj);
__Pyx_XDECREF(__pyx_v_index);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":431
* self.setitem_indexed(index, value)
*
* cdef is_slice(self, obj): # <<<<<<<<<<<<<<
* if not isinstance(obj, memoryview):
* try:
*/
static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
int __pyx_t_9;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("is_slice", 0);
__Pyx_INCREF(__pyx_v_obj);
/* "View.MemoryView":432
*
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview): # <<<<<<<<<<<<<<
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
*/
__pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_memoryview_type);
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":433
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview):
* try: # <<<<<<<<<<<<<<
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5);
__Pyx_XGOTREF(__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_4);
__Pyx_XGOTREF(__pyx_t_5);
/*try:*/ {
/* "View.MemoryView":434
* if not isinstance(obj, memoryview):
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<<
* self.dtype_is_object)
* except TypeError:
*/
__pyx_t_6 = __Pyx_PyInt_From_int(((__pyx_v_self->flags & (~PyBUF_WRITABLE)) | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 434, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_6);
/* "View.MemoryView":435
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object) # <<<<<<<<<<<<<<
* except TypeError:
* return None
*/
__pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 435, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_7);
/* "View.MemoryView":434
* if not isinstance(obj, memoryview):
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<<
* self.dtype_is_object)
* except TypeError:
*/
__pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 434, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_INCREF(__pyx_v_obj);
__Pyx_GIVEREF(__pyx_v_obj);
PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6);
__Pyx_GIVEREF(__pyx_t_7);
PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7);
__pyx_t_6 = 0;
__pyx_t_7 = 0;
__pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 434, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7);
__pyx_t_7 = 0;
/* "View.MemoryView":433
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview):
* try: # <<<<<<<<<<<<<<
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
*/
}
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
goto __pyx_L9_try_end;
__pyx_L4_error:;
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
/* "View.MemoryView":436
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
* except TypeError: # <<<<<<<<<<<<<<
* return None
*
*/
__pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError);
if (__pyx_t_9) {
__Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(1, 436, __pyx_L6_except_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_GOTREF(__pyx_t_8);
__Pyx_GOTREF(__pyx_t_6);
/* "View.MemoryView":437
* self.dtype_is_object)
* except TypeError:
* return None # <<<<<<<<<<<<<<
*
* return obj
*/
__Pyx_XDECREF(__pyx_r);
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
goto __pyx_L7_except_return;
}
goto __pyx_L6_except_error;
__pyx_L6_except_error:;
/* "View.MemoryView":433
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview):
* try: # <<<<<<<<<<<<<<
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
*/
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_XGIVEREF(__pyx_t_5);
__Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5);
goto __pyx_L1_error;
__pyx_L7_except_return:;
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_XGIVEREF(__pyx_t_5);
__Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5);
goto __pyx_L0;
__pyx_L9_try_end:;
}
/* "View.MemoryView":432
*
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview): # <<<<<<<<<<<<<<
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
*/
}
/* "View.MemoryView":439
* return None
*
* return obj # <<<<<<<<<<<<<<
*
* cdef setitem_slice_assignment(self, dst, src):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_obj);
__pyx_r = __pyx_v_obj;
goto __pyx_L0;
/* "View.MemoryView":431
* self.setitem_indexed(index, value)
*
* cdef is_slice(self, obj): # <<<<<<<<<<<<<<
* if not isinstance(obj, memoryview):
* try:
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_obj);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":441
* return obj
*
* cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice dst_slice
* cdef __Pyx_memviewslice src_slice
*/
static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) {
__Pyx_memviewslice __pyx_v_dst_slice;
__Pyx_memviewslice __pyx_v_src_slice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice *__pyx_t_1;
__Pyx_memviewslice *__pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("setitem_slice_assignment", 0);
/* "View.MemoryView":445
* cdef __Pyx_memviewslice src_slice
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<<
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object)
*/
if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) __PYX_ERR(1, 445, __pyx_L1_error)
__pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 445, __pyx_L1_error)
/* "View.MemoryView":446
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0],
* get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<<
* src.ndim, dst.ndim, self.dtype_is_object)
*
*/
if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) __PYX_ERR(1, 446, __pyx_L1_error)
__pyx_t_2 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice)); if (unlikely(__pyx_t_2 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 446, __pyx_L1_error)
/* "View.MemoryView":447
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0],
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<<
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value):
*/
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 447, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 447, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 447, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 447, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":445
* cdef __Pyx_memviewslice src_slice
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<<
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object)
*/
__pyx_t_6 = __pyx_memoryview_copy_contents((__pyx_t_1[0]), (__pyx_t_2[0]), __pyx_t_4, __pyx_t_5, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 445, __pyx_L1_error)
/* "View.MemoryView":441
* return obj
*
* cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice dst_slice
* cdef __Pyx_memviewslice src_slice
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":449
* src.ndim, dst.ndim, self.dtype_is_object)
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<<
* cdef int array[128]
* cdef void *tmp = NULL
*/
static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) {
int __pyx_v_array[0x80];
void *__pyx_v_tmp;
void *__pyx_v_item;
__Pyx_memviewslice *__pyx_v_dst_slice;
__Pyx_memviewslice __pyx_v_tmp_slice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice *__pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
int __pyx_t_5;
char const *__pyx_t_6;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
PyObject *__pyx_t_9 = NULL;
PyObject *__pyx_t_10 = NULL;
PyObject *__pyx_t_11 = NULL;
PyObject *__pyx_t_12 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0);
/* "View.MemoryView":451
* cdef setitem_slice_assign_scalar(self, memoryview dst, value):
* cdef int array[128]
* cdef void *tmp = NULL # <<<<<<<<<<<<<<
* cdef void *item
*
*/
__pyx_v_tmp = NULL;
/* "View.MemoryView":456
* cdef __Pyx_memviewslice *dst_slice
* cdef __Pyx_memviewslice tmp_slice
* dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<<
*
* if <size_t>self.view.itemsize > sizeof(array):
*/
__pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 456, __pyx_L1_error)
__pyx_v_dst_slice = __pyx_t_1;
/* "View.MemoryView":458
* dst_slice = get_slice_from_memview(dst, &tmp_slice)
*
* if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<<
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL:
*/
__pyx_t_2 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":459
*
* if <size_t>self.view.itemsize > sizeof(array):
* tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<<
* if tmp == NULL:
* raise MemoryError
*/
__pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize);
/* "View.MemoryView":460
* if <size_t>self.view.itemsize > sizeof(array):
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL: # <<<<<<<<<<<<<<
* raise MemoryError
* item = tmp
*/
__pyx_t_2 = ((__pyx_v_tmp == NULL) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":461
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL:
* raise MemoryError # <<<<<<<<<<<<<<
* item = tmp
* else:
*/
PyErr_NoMemory(); __PYX_ERR(1, 461, __pyx_L1_error)
/* "View.MemoryView":460
* if <size_t>self.view.itemsize > sizeof(array):
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL: # <<<<<<<<<<<<<<
* raise MemoryError
* item = tmp
*/
}
/* "View.MemoryView":462
* if tmp == NULL:
* raise MemoryError
* item = tmp # <<<<<<<<<<<<<<
* else:
* item = <void *> array
*/
__pyx_v_item = __pyx_v_tmp;
/* "View.MemoryView":458
* dst_slice = get_slice_from_memview(dst, &tmp_slice)
*
* if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<<
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL:
*/
goto __pyx_L3;
}
/* "View.MemoryView":464
* item = tmp
* else:
* item = <void *> array # <<<<<<<<<<<<<<
*
* try:
*/
/*else*/ {
__pyx_v_item = ((void *)__pyx_v_array);
}
__pyx_L3:;
/* "View.MemoryView":466
* item = <void *> array
*
* try: # <<<<<<<<<<<<<<
* if self.dtype_is_object:
* (<PyObject **> item)[0] = <PyObject *> value
*/
/*try:*/ {
/* "View.MemoryView":467
*
* try:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* (<PyObject **> item)[0] = <PyObject *> value
* else:
*/
__pyx_t_2 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_2) {
/* "View.MemoryView":468
* try:
* if self.dtype_is_object:
* (<PyObject **> item)[0] = <PyObject *> value # <<<<<<<<<<<<<<
* else:
* self.assign_item_from_object(<char *> item, value)
*/
(((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value);
/* "View.MemoryView":467
*
* try:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* (<PyObject **> item)[0] = <PyObject *> value
* else:
*/
goto __pyx_L8;
}
/* "View.MemoryView":470
* (<PyObject **> item)[0] = <PyObject *> value
* else:
* self.assign_item_from_object(<char *> item, value) # <<<<<<<<<<<<<<
*
*
*/
/*else*/ {
__pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 470, __pyx_L6_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
}
__pyx_L8:;
/* "View.MemoryView":474
*
*
* if self.view.suboffsets != NULL: # <<<<<<<<<<<<<<
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
*/
__pyx_t_2 = ((__pyx_v_self->view.suboffsets != NULL) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":475
*
* if self.view.suboffsets != NULL:
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<<
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
* item, self.dtype_is_object)
*/
__pyx_t_3 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 475, __pyx_L6_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":474
*
*
* if self.view.suboffsets != NULL: # <<<<<<<<<<<<<<
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
*/
}
/* "View.MemoryView":476
* if self.view.suboffsets != NULL:
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<<
* item, self.dtype_is_object)
* finally:
*/
__pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object);
}
/* "View.MemoryView":479
* item, self.dtype_is_object)
* finally:
* PyMem_Free(tmp) # <<<<<<<<<<<<<<
*
* cdef setitem_indexed(self, index, value):
*/
/*finally:*/ {
/*normal exit:*/{
PyMem_Free(__pyx_v_tmp);
goto __pyx_L7;
}
__pyx_L6_error:;
/*exception exit:*/{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0;
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12);
if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9) < 0)) __Pyx_ErrFetch(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9);
__Pyx_XGOTREF(__pyx_t_7);
__Pyx_XGOTREF(__pyx_t_8);
__Pyx_XGOTREF(__pyx_t_9);
__Pyx_XGOTREF(__pyx_t_10);
__Pyx_XGOTREF(__pyx_t_11);
__Pyx_XGOTREF(__pyx_t_12);
__pyx_t_4 = __pyx_lineno; __pyx_t_5 = __pyx_clineno; __pyx_t_6 = __pyx_filename;
{
PyMem_Free(__pyx_v_tmp);
}
if (PY_MAJOR_VERSION >= 3) {
__Pyx_XGIVEREF(__pyx_t_10);
__Pyx_XGIVEREF(__pyx_t_11);
__Pyx_XGIVEREF(__pyx_t_12);
__Pyx_ExceptionReset(__pyx_t_10, __pyx_t_11, __pyx_t_12);
}
__Pyx_XGIVEREF(__pyx_t_7);
__Pyx_XGIVEREF(__pyx_t_8);
__Pyx_XGIVEREF(__pyx_t_9);
__Pyx_ErrRestore(__pyx_t_7, __pyx_t_8, __pyx_t_9);
__pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0;
__pyx_lineno = __pyx_t_4; __pyx_clineno = __pyx_t_5; __pyx_filename = __pyx_t_6;
goto __pyx_L1_error;
}
__pyx_L7:;
}
/* "View.MemoryView":449
* src.ndim, dst.ndim, self.dtype_is_object)
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<<
* cdef int array[128]
* cdef void *tmp = NULL
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":481
* PyMem_Free(tmp)
*
* cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<<
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value)
*/
static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
char *__pyx_v_itemp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
char *__pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("setitem_indexed", 0);
/* "View.MemoryView":482
*
* cdef setitem_indexed(self, index, value):
* cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<<
* self.assign_item_from_object(itemp, value)
*
*/
__pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == ((char *)NULL))) __PYX_ERR(1, 482, __pyx_L1_error)
__pyx_v_itemp = __pyx_t_1;
/* "View.MemoryView":483
* cdef setitem_indexed(self, index, value):
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<<
*
* cdef convert_item_to_object(self, char *itemp):
*/
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 483, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":481
* PyMem_Free(tmp)
*
* cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<<
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":485
* self.assign_item_from_object(itemp, value)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) {
PyObject *__pyx_v_struct = NULL;
PyObject *__pyx_v_bytesitem = 0;
PyObject *__pyx_v_result = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
int __pyx_t_8;
PyObject *__pyx_t_9 = NULL;
size_t __pyx_t_10;
int __pyx_t_11;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("convert_item_to_object", 0);
/* "View.MemoryView":488
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
* import struct # <<<<<<<<<<<<<<
* cdef bytes bytesitem
*
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 488, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_struct = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":491
* cdef bytes bytesitem
*
* bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<<
* try:
* result = struct.unpack(self.view.format, bytesitem)
*/
__pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 491, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_bytesitem = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":492
*
* bytesitem = itemp[:self.view.itemsize]
* try: # <<<<<<<<<<<<<<
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4);
__Pyx_XGOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_4);
/*try:*/ {
/* "View.MemoryView":493
* bytesitem = itemp[:self.view.itemsize]
* try:
* result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<<
* except struct.error:
* raise ValueError("Unable to convert item to object")
*/
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 493, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 493, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_7 = NULL;
__pyx_t_8 = 0;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) {
__pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5);
if (likely(__pyx_t_7)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
__Pyx_INCREF(__pyx_t_7);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_5, function);
__pyx_t_8 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_5)) {
PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem};
__pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error)
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) {
PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem};
__pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error)
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
} else
#endif
{
__pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 493, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_9);
if (__pyx_t_7) {
__Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL;
}
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6);
__Pyx_INCREF(__pyx_v_bytesitem);
__Pyx_GIVEREF(__pyx_v_bytesitem);
PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_bytesitem);
__pyx_t_6 = 0;
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_result = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":492
*
* bytesitem = itemp[:self.view.itemsize]
* try: # <<<<<<<<<<<<<<
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
*/
}
/* "View.MemoryView":497
* raise ValueError("Unable to convert item to object")
* else:
* if len(self.view.format) == 1: # <<<<<<<<<<<<<<
* return result[0]
* return result
*/
/*else:*/ {
__pyx_t_10 = strlen(__pyx_v_self->view.format);
__pyx_t_11 = ((__pyx_t_10 == 1) != 0);
if (__pyx_t_11) {
/* "View.MemoryView":498
* else:
* if len(self.view.format) == 1:
* return result[0] # <<<<<<<<<<<<<<
* return result
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 498, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L6_except_return;
/* "View.MemoryView":497
* raise ValueError("Unable to convert item to object")
* else:
* if len(self.view.format) == 1: # <<<<<<<<<<<<<<
* return result[0]
* return result
*/
}
/* "View.MemoryView":499
* if len(self.view.format) == 1:
* return result[0]
* return result # <<<<<<<<<<<<<<
*
* cdef assign_item_from_object(self, char *itemp, object value):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_result);
__pyx_r = __pyx_v_result;
goto __pyx_L6_except_return;
}
__pyx_L3_error:;
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
/* "View.MemoryView":494
* try:
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error: # <<<<<<<<<<<<<<
* raise ValueError("Unable to convert item to object")
* else:
*/
__Pyx_ErrFetch(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9);
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 494, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_8 = __Pyx_PyErr_GivenExceptionMatches(__pyx_t_1, __pyx_t_6);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_ErrRestore(__pyx_t_1, __pyx_t_5, __pyx_t_9);
__pyx_t_1 = 0; __pyx_t_5 = 0; __pyx_t_9 = 0;
if (__pyx_t_8) {
__Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_9, &__pyx_t_5, &__pyx_t_1) < 0) __PYX_ERR(1, 494, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_t_1);
/* "View.MemoryView":495
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
* raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<<
* else:
* if len(self.view.format) == 1:
*/
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 495, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_Raise(__pyx_t_6, 0, 0, 0);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__PYX_ERR(1, 495, __pyx_L5_except_error)
}
goto __pyx_L5_except_error;
__pyx_L5_except_error:;
/* "View.MemoryView":492
*
* bytesitem = itemp[:self.view.itemsize]
* try: # <<<<<<<<<<<<<<
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
*/
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
goto __pyx_L1_error;
__pyx_L6_except_return:;
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
goto __pyx_L0;
}
/* "View.MemoryView":485
* self.assign_item_from_object(itemp, value)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_struct);
__Pyx_XDECREF(__pyx_v_bytesitem);
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":501
* return result
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) {
PyObject *__pyx_v_struct = NULL;
char __pyx_v_c;
PyObject *__pyx_v_bytesvalue = 0;
Py_ssize_t __pyx_v_i;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_t_3;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
int __pyx_t_7;
PyObject *__pyx_t_8 = NULL;
Py_ssize_t __pyx_t_9;
PyObject *__pyx_t_10 = NULL;
char *__pyx_t_11;
char *__pyx_t_12;
char *__pyx_t_13;
char *__pyx_t_14;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("assign_item_from_object", 0);
/* "View.MemoryView":504
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
* import struct # <<<<<<<<<<<<<<
* cdef char c
* cdef bytes bytesvalue
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 504, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_struct = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":509
* cdef Py_ssize_t i
*
* if isinstance(value, tuple): # <<<<<<<<<<<<<<
* bytesvalue = struct.pack(self.view.format, *value)
* else:
*/
__pyx_t_2 = PyTuple_Check(__pyx_v_value);
__pyx_t_3 = (__pyx_t_2 != 0);
if (__pyx_t_3) {
/* "View.MemoryView":510
*
* if isinstance(value, tuple):
* bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<<
* else:
* bytesvalue = struct.pack(self.view.format, value)
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 510, __pyx_L1_error)
__pyx_v_bytesvalue = ((PyObject*)__pyx_t_4);
__pyx_t_4 = 0;
/* "View.MemoryView":509
* cdef Py_ssize_t i
*
* if isinstance(value, tuple): # <<<<<<<<<<<<<<
* bytesvalue = struct.pack(self.view.format, *value)
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":512
* bytesvalue = struct.pack(self.view.format, *value)
* else:
* bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<<
*
* for i, c in enumerate(bytesvalue):
*/
/*else*/ {
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 512, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 512, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_5 = NULL;
__pyx_t_7 = 0;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_6, function);
__pyx_t_7 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_6)) {
PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value};
__pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) {
PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value};
__pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
#endif
{
__pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 512, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
if (__pyx_t_5) {
__Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL;
}
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_1);
__Pyx_INCREF(__pyx_v_value);
__Pyx_GIVEREF(__pyx_v_value);
PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_value);
__pyx_t_1 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
}
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 512, __pyx_L1_error)
__pyx_v_bytesvalue = ((PyObject*)__pyx_t_4);
__pyx_t_4 = 0;
}
__pyx_L3:;
/* "View.MemoryView":514
* bytesvalue = struct.pack(self.view.format, value)
*
* for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<<
* itemp[i] = c
*
*/
__pyx_t_9 = 0;
if (unlikely(__pyx_v_bytesvalue == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable");
__PYX_ERR(1, 514, __pyx_L1_error)
}
__Pyx_INCREF(__pyx_v_bytesvalue);
__pyx_t_10 = __pyx_v_bytesvalue;
__pyx_t_12 = PyBytes_AS_STRING(__pyx_t_10);
__pyx_t_13 = (__pyx_t_12 + PyBytes_GET_SIZE(__pyx_t_10));
for (__pyx_t_14 = __pyx_t_12; __pyx_t_14 < __pyx_t_13; __pyx_t_14++) {
__pyx_t_11 = __pyx_t_14;
__pyx_v_c = (__pyx_t_11[0]);
/* "View.MemoryView":515
*
* for i, c in enumerate(bytesvalue):
* itemp[i] = c # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
__pyx_v_i = __pyx_t_9;
/* "View.MemoryView":514
* bytesvalue = struct.pack(self.view.format, value)
*
* for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<<
* itemp[i] = c
*
*/
__pyx_t_9 = (__pyx_t_9 + 1);
/* "View.MemoryView":515
*
* for i, c in enumerate(bytesvalue):
* itemp[i] = c # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
(__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c;
}
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
/* "View.MemoryView":501
* return result
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_XDECREF(__pyx_t_10);
__Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_struct);
__Pyx_XDECREF(__pyx_v_bytesvalue);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":518
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* if flags & PyBUF_WRITABLE and self.view.readonly:
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*/
/* Python wrapper */
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
Py_ssize_t *__pyx_t_4;
char *__pyx_t_5;
void *__pyx_t_6;
int __pyx_t_7;
Py_ssize_t __pyx_t_8;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
if (__pyx_v_info == NULL) {
PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete");
return -1;
}
__Pyx_RefNannySetupContext("__getbuffer__", 0);
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
/* "View.MemoryView":519
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<<
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*
*/
__pyx_t_2 = ((__pyx_v_flags & PyBUF_WRITABLE) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_2 = (__pyx_v_self->view.readonly != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L4_bool_binop_done:;
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":520
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_WRITABLE and self.view.readonly:
* raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<<
*
* if flags & PyBUF_ND:
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 520, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 520, __pyx_L1_error)
/* "View.MemoryView":519
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<<
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*
*/
}
/* "View.MemoryView":522
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*
* if flags & PyBUF_ND: # <<<<<<<<<<<<<<
* info.shape = self.view.shape
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_ND) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":523
*
* if flags & PyBUF_ND:
* info.shape = self.view.shape # <<<<<<<<<<<<<<
* else:
* info.shape = NULL
*/
__pyx_t_4 = __pyx_v_self->view.shape;
__pyx_v_info->shape = __pyx_t_4;
/* "View.MemoryView":522
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*
* if flags & PyBUF_ND: # <<<<<<<<<<<<<<
* info.shape = self.view.shape
* else:
*/
goto __pyx_L6;
}
/* "View.MemoryView":525
* info.shape = self.view.shape
* else:
* info.shape = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_STRIDES:
*/
/*else*/ {
__pyx_v_info->shape = NULL;
}
__pyx_L6:;
/* "View.MemoryView":527
* info.shape = NULL
*
* if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<<
* info.strides = self.view.strides
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":528
*
* if flags & PyBUF_STRIDES:
* info.strides = self.view.strides # <<<<<<<<<<<<<<
* else:
* info.strides = NULL
*/
__pyx_t_4 = __pyx_v_self->view.strides;
__pyx_v_info->strides = __pyx_t_4;
/* "View.MemoryView":527
* info.shape = NULL
*
* if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<<
* info.strides = self.view.strides
* else:
*/
goto __pyx_L7;
}
/* "View.MemoryView":530
* info.strides = self.view.strides
* else:
* info.strides = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_INDIRECT:
*/
/*else*/ {
__pyx_v_info->strides = NULL;
}
__pyx_L7:;
/* "View.MemoryView":532
* info.strides = NULL
*
* if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<<
* info.suboffsets = self.view.suboffsets
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":533
*
* if flags & PyBUF_INDIRECT:
* info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<<
* else:
* info.suboffsets = NULL
*/
__pyx_t_4 = __pyx_v_self->view.suboffsets;
__pyx_v_info->suboffsets = __pyx_t_4;
/* "View.MemoryView":532
* info.strides = NULL
*
* if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<<
* info.suboffsets = self.view.suboffsets
* else:
*/
goto __pyx_L8;
}
/* "View.MemoryView":535
* info.suboffsets = self.view.suboffsets
* else:
* info.suboffsets = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
/*else*/ {
__pyx_v_info->suboffsets = NULL;
}
__pyx_L8:;
/* "View.MemoryView":537
* info.suboffsets = NULL
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.view.format
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":538
*
* if flags & PyBUF_FORMAT:
* info.format = self.view.format # <<<<<<<<<<<<<<
* else:
* info.format = NULL
*/
__pyx_t_5 = __pyx_v_self->view.format;
__pyx_v_info->format = __pyx_t_5;
/* "View.MemoryView":537
* info.suboffsets = NULL
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.view.format
* else:
*/
goto __pyx_L9;
}
/* "View.MemoryView":540
* info.format = self.view.format
* else:
* info.format = NULL # <<<<<<<<<<<<<<
*
* info.buf = self.view.buf
*/
/*else*/ {
__pyx_v_info->format = NULL;
}
__pyx_L9:;
/* "View.MemoryView":542
* info.format = NULL
*
* info.buf = self.view.buf # <<<<<<<<<<<<<<
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize
*/
__pyx_t_6 = __pyx_v_self->view.buf;
__pyx_v_info->buf = __pyx_t_6;
/* "View.MemoryView":543
*
* info.buf = self.view.buf
* info.ndim = self.view.ndim # <<<<<<<<<<<<<<
* info.itemsize = self.view.itemsize
* info.len = self.view.len
*/
__pyx_t_7 = __pyx_v_self->view.ndim;
__pyx_v_info->ndim = __pyx_t_7;
/* "View.MemoryView":544
* info.buf = self.view.buf
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize # <<<<<<<<<<<<<<
* info.len = self.view.len
* info.readonly = self.view.readonly
*/
__pyx_t_8 = __pyx_v_self->view.itemsize;
__pyx_v_info->itemsize = __pyx_t_8;
/* "View.MemoryView":545
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize
* info.len = self.view.len # <<<<<<<<<<<<<<
* info.readonly = self.view.readonly
* info.obj = self
*/
__pyx_t_8 = __pyx_v_self->view.len;
__pyx_v_info->len = __pyx_t_8;
/* "View.MemoryView":546
* info.itemsize = self.view.itemsize
* info.len = self.view.len
* info.readonly = self.view.readonly # <<<<<<<<<<<<<<
* info.obj = self
*
*/
__pyx_t_1 = __pyx_v_self->view.readonly;
__pyx_v_info->readonly = __pyx_t_1;
/* "View.MemoryView":547
* info.len = self.view.len
* info.readonly = self.view.readonly
* info.obj = self # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = ((PyObject *)__pyx_v_self);
/* "View.MemoryView":518
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* if flags & PyBUF_WRITABLE and self.view.readonly:
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
if (__pyx_v_info->obj != NULL) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
goto __pyx_L2;
__pyx_L0:;
if (__pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
__pyx_L2:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":553
*
* @property
* def T(self): # <<<<<<<<<<<<<<
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
struct __pyx_memoryviewslice_obj *__pyx_v_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":554
* @property
* def T(self):
* cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<<
* transpose_memslice(&result.from_slice)
* return result
*/
__pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 554, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) __PYX_ERR(1, 554, __pyx_L1_error)
__pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":555
* def T(self):
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<<
* return result
*
*/
__pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 555, __pyx_L1_error)
/* "View.MemoryView":556
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
* return result # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":553
*
* @property
* def T(self): # <<<<<<<<<<<<<<
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":559
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.obj
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":560
* @property
* def base(self):
* return self.obj # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->obj);
__pyx_r = __pyx_v_self->obj;
goto __pyx_L0;
/* "View.MemoryView":559
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.obj
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":563
*
* @property
* def shape(self): # <<<<<<<<<<<<<<
* return tuple([length for length in self.view.shape[:self.view.ndim]])
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_v_length;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
Py_ssize_t *__pyx_t_2;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":564
* @property
* def shape(self):
* return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 564, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim);
for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) {
__pyx_t_2 = __pyx_t_4;
__pyx_v_length = (__pyx_t_2[0]);
__pyx_t_5 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 564, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(1, 564, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
}
__pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 564, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "View.MemoryView":563
*
* @property
* def shape(self): # <<<<<<<<<<<<<<
* return tuple([length for length in self.view.shape[:self.view.ndim]])
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":567
*
* @property
* def strides(self): # <<<<<<<<<<<<<<
* if self.view.strides == NULL:
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_v_stride;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
Py_ssize_t *__pyx_t_5;
PyObject *__pyx_t_6 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":568
* @property
* def strides(self):
* if self.view.strides == NULL: # <<<<<<<<<<<<<<
*
* raise ValueError("Buffer view does not expose strides")
*/
__pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":570
* if self.view.strides == NULL:
*
* raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<<
*
* return tuple([stride for stride in self.view.strides[:self.view.ndim]])
*/
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 570, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__PYX_ERR(1, 570, __pyx_L1_error)
/* "View.MemoryView":568
* @property
* def strides(self):
* if self.view.strides == NULL: # <<<<<<<<<<<<<<
*
* raise ValueError("Buffer view does not expose strides")
*/
}
/* "View.MemoryView":572
* raise ValueError("Buffer view does not expose strides")
*
* return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 572, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim);
for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) {
__pyx_t_3 = __pyx_t_5;
__pyx_v_stride = (__pyx_t_3[0]);
__pyx_t_6 = PyInt_FromSsize_t(__pyx_v_stride); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 572, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(1, 572, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
}
__pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 572, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_6;
__pyx_t_6 = 0;
goto __pyx_L0;
/* "View.MemoryView":567
*
* @property
* def strides(self): # <<<<<<<<<<<<<<
* if self.view.strides == NULL:
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":575
*
* @property
* def suboffsets(self): # <<<<<<<<<<<<<<
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_v_suboffset;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
Py_ssize_t *__pyx_t_4;
Py_ssize_t *__pyx_t_5;
Py_ssize_t *__pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":576
* @property
* def suboffsets(self):
* if self.view.suboffsets == NULL: # <<<<<<<<<<<<<<
* return (-1,) * self.view.ndim
*
*/
__pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":577
* def suboffsets(self):
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim # <<<<<<<<<<<<<<
*
* return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]])
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 577, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_Multiply(__pyx_tuple__13, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 577, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":576
* @property
* def suboffsets(self):
* if self.view.suboffsets == NULL: # <<<<<<<<<<<<<<
* return (-1,) * self.view.ndim
*
*/
}
/* "View.MemoryView":579
* return (-1,) * self.view.ndim
*
* return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 579, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim);
for (__pyx_t_6 = __pyx_v_self->view.suboffsets; __pyx_t_6 < __pyx_t_5; __pyx_t_6++) {
__pyx_t_4 = __pyx_t_6;
__pyx_v_suboffset = (__pyx_t_4[0]);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_suboffset); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 579, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_2))) __PYX_ERR(1, 579, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__pyx_t_2 = PyList_AsTuple(((PyObject*)__pyx_t_3)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 579, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":575
*
* @property
* def suboffsets(self): # <<<<<<<<<<<<<<
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":582
*
* @property
* def ndim(self): # <<<<<<<<<<<<<<
* return self.view.ndim
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":583
* @property
* def ndim(self):
* return self.view.ndim # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 583, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":582
*
* @property
* def ndim(self): # <<<<<<<<<<<<<<
* return self.view.ndim
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":586
*
* @property
* def itemsize(self): # <<<<<<<<<<<<<<
* return self.view.itemsize
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":587
* @property
* def itemsize(self):
* return self.view.itemsize # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 587, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":586
*
* @property
* def itemsize(self): # <<<<<<<<<<<<<<
* return self.view.itemsize
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":590
*
* @property
* def nbytes(self): # <<<<<<<<<<<<<<
* return self.size * self.view.itemsize
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":591
* @property
* def nbytes(self):
* return self.size * self.view.itemsize # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 591, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 591, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 591, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":590
*
* @property
* def nbytes(self): # <<<<<<<<<<<<<<
* return self.size * self.view.itemsize
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":594
*
* @property
* def size(self): # <<<<<<<<<<<<<<
* if self._size is None:
* result = 1
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_v_result = NULL;
PyObject *__pyx_v_length = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
Py_ssize_t *__pyx_t_5;
PyObject *__pyx_t_6 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":595
* @property
* def size(self):
* if self._size is None: # <<<<<<<<<<<<<<
* result = 1
*
*/
__pyx_t_1 = (__pyx_v_self->_size == Py_None);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":596
* def size(self):
* if self._size is None:
* result = 1 # <<<<<<<<<<<<<<
*
* for length in self.view.shape[:self.view.ndim]:
*/
__Pyx_INCREF(__pyx_int_1);
__pyx_v_result = __pyx_int_1;
/* "View.MemoryView":598
* result = 1
*
* for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<<
* result *= length
*
*/
__pyx_t_4 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim);
for (__pyx_t_5 = __pyx_v_self->view.shape; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) {
__pyx_t_3 = __pyx_t_5;
__pyx_t_6 = PyInt_FromSsize_t((__pyx_t_3[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 598, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6);
__pyx_t_6 = 0;
/* "View.MemoryView":599
*
* for length in self.view.shape[:self.view.ndim]:
* result *= length # <<<<<<<<<<<<<<
*
* self._size = result
*/
__pyx_t_6 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 599, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6);
__pyx_t_6 = 0;
}
/* "View.MemoryView":601
* result *= length
*
* self._size = result # <<<<<<<<<<<<<<
*
* return self._size
*/
__Pyx_INCREF(__pyx_v_result);
__Pyx_GIVEREF(__pyx_v_result);
__Pyx_GOTREF(__pyx_v_self->_size);
__Pyx_DECREF(__pyx_v_self->_size);
__pyx_v_self->_size = __pyx_v_result;
/* "View.MemoryView":595
* @property
* def size(self):
* if self._size is None: # <<<<<<<<<<<<<<
* result = 1
*
*/
}
/* "View.MemoryView":603
* self._size = result
*
* return self._size # <<<<<<<<<<<<<<
*
* def __len__(self):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->_size);
__pyx_r = __pyx_v_self->_size;
goto __pyx_L0;
/* "View.MemoryView":594
*
* @property
* def size(self): # <<<<<<<<<<<<<<
* if self._size is None:
* result = 1
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_6);
__Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XDECREF(__pyx_v_length);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":605
* return self._size
*
* def __len__(self): # <<<<<<<<<<<<<<
* if self.view.ndim >= 1:
* return self.view.shape[0]
*/
/* Python wrapper */
static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/
static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("__len__", 0);
/* "View.MemoryView":606
*
* def __len__(self):
* if self.view.ndim >= 1: # <<<<<<<<<<<<<<
* return self.view.shape[0]
*
*/
__pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":607
* def __len__(self):
* if self.view.ndim >= 1:
* return self.view.shape[0] # <<<<<<<<<<<<<<
*
* return 0
*/
__pyx_r = (__pyx_v_self->view.shape[0]);
goto __pyx_L0;
/* "View.MemoryView":606
*
* def __len__(self):
* if self.view.ndim >= 1: # <<<<<<<<<<<<<<
* return self.view.shape[0]
*
*/
}
/* "View.MemoryView":609
* return self.view.shape[0]
*
* return 0 # <<<<<<<<<<<<<<
*
* def __repr__(self):
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":605
* return self._size
*
* def __len__(self): # <<<<<<<<<<<<<<
* if self.view.ndim >= 1:
* return self.view.shape[0]
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":611
* return 0
*
* def __repr__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self))
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__repr__", 0);
/* "View.MemoryView":612
*
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<<
* id(self))
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":613
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self)) # <<<<<<<<<<<<<<
*
* def __str__(self):
*/
__pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 613, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
/* "View.MemoryView":612
*
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<<
* id(self))
*
*/
__pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":611
* return 0
*
* def __repr__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self))
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":615
* id(self))
*
* def __str__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,)
*
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__str__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__str__", 0);
/* "View.MemoryView":616
*
* def __str__(self):
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 616, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 616, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1);
__pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":615
* id(self))
*
* def __str__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":619
*
*
* def is_c_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice *__pyx_v_mslice;
__Pyx_memviewslice __pyx_v_tmp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice *__pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("is_c_contig", 0);
/* "View.MemoryView":622
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<<
* return slice_is_contig(mslice[0], 'C', self.view.ndim)
*
*/
__pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 622, __pyx_L1_error)
__pyx_v_mslice = __pyx_t_1;
/* "View.MemoryView":623
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp)
* return slice_is_contig(mslice[0], 'C', self.view.ndim) # <<<<<<<<<<<<<<
*
* def is_f_contig(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 623, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":619
*
*
* def is_c_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":625
* return slice_is_contig(mslice[0], 'C', self.view.ndim)
*
* def is_f_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice *__pyx_v_mslice;
__Pyx_memviewslice __pyx_v_tmp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice *__pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("is_f_contig", 0);
/* "View.MemoryView":628
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<<
* return slice_is_contig(mslice[0], 'F', self.view.ndim)
*
*/
__pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 628, __pyx_L1_error)
__pyx_v_mslice = __pyx_t_1;
/* "View.MemoryView":629
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp)
* return slice_is_contig(mslice[0], 'F', self.view.ndim) # <<<<<<<<<<<<<<
*
* def copy(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 629, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":625
* return slice_is_contig(mslice[0], 'C', self.view.ndim)
*
* def is_f_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":631
* return slice_is_contig(mslice[0], 'F', self.view.ndim)
*
* def copy(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("copy (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice __pyx_v_mslice;
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("copy", 0);
/* "View.MemoryView":633
* def copy(self):
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<<
*
* slice_copy(self, &mslice)
*/
__pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS));
/* "View.MemoryView":635
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*
* slice_copy(self, &mslice) # <<<<<<<<<<<<<<
* mslice = slice_copy_contig(&mslice, "c", self.view.ndim,
* self.view.itemsize,
*/
__pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice));
/* "View.MemoryView":636
*
* slice_copy(self, &mslice)
* mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<<
* self.view.itemsize,
* flags|PyBUF_C_CONTIGUOUS,
*/
__pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), ((char *)"c"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 636, __pyx_L1_error)
__pyx_v_mslice = __pyx_t_1;
/* "View.MemoryView":641
* self.dtype_is_object)
*
* return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<<
*
* def copy_fortran(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 641, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":631
* return slice_is_contig(mslice[0], 'F', self.view.ndim)
*
* def copy(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":643
* return memoryview_copy_from_slice(self, &mslice)
*
* def copy_fortran(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice __pyx_v_src;
__Pyx_memviewslice __pyx_v_dst;
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("copy_fortran", 0);
/* "View.MemoryView":645
* def copy_fortran(self):
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<<
*
* slice_copy(self, &src)
*/
__pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS));
/* "View.MemoryView":647
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*
* slice_copy(self, &src) # <<<<<<<<<<<<<<
* dst = slice_copy_contig(&src, "fortran", self.view.ndim,
* self.view.itemsize,
*/
__pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src));
/* "View.MemoryView":648
*
* slice_copy(self, &src)
* dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<<
* self.view.itemsize,
* flags|PyBUF_F_CONTIGUOUS,
*/
__pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), ((char *)"fortran"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 648, __pyx_L1_error)
__pyx_v_dst = __pyx_t_1;
/* "View.MemoryView":653
* self.dtype_is_object)
*
* return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 653, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":643
* return memoryview_copy_from_slice(self, &mslice)
*
* def copy_fortran(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_memoryview___reduce_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__14, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 2, __pyx_L1_error)
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_memoryview_2__setstate_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__15, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 4, __pyx_L1_error)
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":657
*
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<<
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
*/
static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) {
struct __pyx_memoryview_obj *__pyx_v_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memoryview_cwrapper", 0);
/* "View.MemoryView":658
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo):
* cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<<
* result.typeinfo = typeinfo
* return result
*/
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 658, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 658, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 658, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_o);
__Pyx_GIVEREF(__pyx_v_o);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 658, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":659
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo):
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo # <<<<<<<<<<<<<<
* return result
*
*/
__pyx_v_result->typeinfo = __pyx_v_typeinfo;
/* "View.MemoryView":660
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
* return result # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_check')
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":657
*
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<<
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":663
*
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<<
* return isinstance(o, memoryview)
*
*/
static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("memoryview_check", 0);
/* "View.MemoryView":664
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o):
* return isinstance(o, memoryview) # <<<<<<<<<<<<<<
*
* cdef tuple _unellipsify(object index, int ndim):
*/
__pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_memoryview_type);
__pyx_r = __pyx_t_1;
goto __pyx_L0;
/* "View.MemoryView":663
*
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<<
* return isinstance(o, memoryview)
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":666
* return isinstance(o, memoryview)
*
* cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<<
* """
* Replace all ellipses with full slices and fill incomplete indices with
*/
static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) {
PyObject *__pyx_v_tup = NULL;
PyObject *__pyx_v_result = NULL;
int __pyx_v_have_slices;
int __pyx_v_seen_ellipsis;
CYTHON_UNUSED PyObject *__pyx_v_idx = NULL;
PyObject *__pyx_v_item = NULL;
Py_ssize_t __pyx_v_nslices;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
Py_ssize_t __pyx_t_5;
PyObject *(*__pyx_t_6)(PyObject *);
PyObject *__pyx_t_7 = NULL;
Py_ssize_t __pyx_t_8;
int __pyx_t_9;
int __pyx_t_10;
PyObject *__pyx_t_11 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("_unellipsify", 0);
/* "View.MemoryView":671
* full slices.
* """
* if not isinstance(index, tuple): # <<<<<<<<<<<<<<
* tup = (index,)
* else:
*/
__pyx_t_1 = PyTuple_Check(__pyx_v_index);
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":672
* """
* if not isinstance(index, tuple):
* tup = (index,) # <<<<<<<<<<<<<<
* else:
* tup = index
*/
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 672, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_index);
__Pyx_GIVEREF(__pyx_v_index);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index);
__pyx_v_tup = __pyx_t_3;
__pyx_t_3 = 0;
/* "View.MemoryView":671
* full slices.
* """
* if not isinstance(index, tuple): # <<<<<<<<<<<<<<
* tup = (index,)
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":674
* tup = (index,)
* else:
* tup = index # <<<<<<<<<<<<<<
*
* result = []
*/
/*else*/ {
__Pyx_INCREF(__pyx_v_index);
__pyx_v_tup = __pyx_v_index;
}
__pyx_L3:;
/* "View.MemoryView":676
* tup = index
*
* result = [] # <<<<<<<<<<<<<<
* have_slices = False
* seen_ellipsis = False
*/
__pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 676, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_v_result = ((PyObject*)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":677
*
* result = []
* have_slices = False # <<<<<<<<<<<<<<
* seen_ellipsis = False
* for idx, item in enumerate(tup):
*/
__pyx_v_have_slices = 0;
/* "View.MemoryView":678
* result = []
* have_slices = False
* seen_ellipsis = False # <<<<<<<<<<<<<<
* for idx, item in enumerate(tup):
* if item is Ellipsis:
*/
__pyx_v_seen_ellipsis = 0;
/* "View.MemoryView":679
* have_slices = False
* seen_ellipsis = False
* for idx, item in enumerate(tup): # <<<<<<<<<<<<<<
* if item is Ellipsis:
* if not seen_ellipsis:
*/
__Pyx_INCREF(__pyx_int_0);
__pyx_t_3 = __pyx_int_0;
if (likely(PyList_CheckExact(__pyx_v_tup)) || PyTuple_CheckExact(__pyx_v_tup)) {
__pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0;
__pyx_t_6 = NULL;
} else {
__pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 679, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 679, __pyx_L1_error)
}
for (;;) {
if (likely(!__pyx_t_6)) {
if (likely(PyList_CheckExact(__pyx_t_4))) {
if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 679, __pyx_L1_error)
#else
__pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
#endif
} else {
if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 679, __pyx_L1_error)
#else
__pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
#endif
}
} else {
__pyx_t_7 = __pyx_t_6(__pyx_t_4);
if (unlikely(!__pyx_t_7)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else __PYX_ERR(1, 679, __pyx_L1_error)
}
break;
}
__Pyx_GOTREF(__pyx_t_7);
}
__Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7);
__pyx_t_7 = 0;
__Pyx_INCREF(__pyx_t_3);
__Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3);
__pyx_t_7 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_3);
__pyx_t_3 = __pyx_t_7;
__pyx_t_7 = 0;
/* "View.MemoryView":680
* seen_ellipsis = False
* for idx, item in enumerate(tup):
* if item is Ellipsis: # <<<<<<<<<<<<<<
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1))
*/
__pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis);
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":681
* for idx, item in enumerate(tup):
* if item is Ellipsis:
* if not seen_ellipsis: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * (ndim - len(tup) + 1))
* seen_ellipsis = True
*/
__pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":682
* if item is Ellipsis:
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<<
* seen_ellipsis = True
* else:
*/
__pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(1, 682, __pyx_L1_error)
__pyx_t_7 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 682, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
{ Py_ssize_t __pyx_temp;
for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) {
__Pyx_INCREF(__pyx_slice__16);
__Pyx_GIVEREF(__pyx_slice__16);
PyList_SET_ITEM(__pyx_t_7, __pyx_temp, __pyx_slice__16);
}
}
__pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_7); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 682, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
/* "View.MemoryView":683
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1))
* seen_ellipsis = True # <<<<<<<<<<<<<<
* else:
* result.append(slice(None))
*/
__pyx_v_seen_ellipsis = 1;
/* "View.MemoryView":681
* for idx, item in enumerate(tup):
* if item is Ellipsis:
* if not seen_ellipsis: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * (ndim - len(tup) + 1))
* seen_ellipsis = True
*/
goto __pyx_L7;
}
/* "View.MemoryView":685
* seen_ellipsis = True
* else:
* result.append(slice(None)) # <<<<<<<<<<<<<<
* have_slices = True
* else:
*/
/*else*/ {
__pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_slice__16); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 685, __pyx_L1_error)
}
__pyx_L7:;
/* "View.MemoryView":686
* else:
* result.append(slice(None))
* have_slices = True # <<<<<<<<<<<<<<
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item):
*/
__pyx_v_have_slices = 1;
/* "View.MemoryView":680
* seen_ellipsis = False
* for idx, item in enumerate(tup):
* if item is Ellipsis: # <<<<<<<<<<<<<<
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1))
*/
goto __pyx_L6;
}
/* "View.MemoryView":688
* have_slices = True
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<<
* raise TypeError("Cannot index with type '%s'" % type(item))
*
*/
/*else*/ {
__pyx_t_2 = PySlice_Check(__pyx_v_item);
__pyx_t_10 = ((!(__pyx_t_2 != 0)) != 0);
if (__pyx_t_10) {
} else {
__pyx_t_1 = __pyx_t_10;
goto __pyx_L9_bool_binop_done;
}
__pyx_t_10 = ((!(PyIndex_Check(__pyx_v_item) != 0)) != 0);
__pyx_t_1 = __pyx_t_10;
__pyx_L9_bool_binop_done:;
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":689
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item):
* raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<<
*
* have_slices = have_slices or isinstance(item, slice)
*/
__pyx_t_7 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 689, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__pyx_t_11 = __Pyx_PyObject_CallOneArg(__pyx_builtin_TypeError, __pyx_t_7); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 689, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_11);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_Raise(__pyx_t_11, 0, 0, 0);
__Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
__PYX_ERR(1, 689, __pyx_L1_error)
/* "View.MemoryView":688
* have_slices = True
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<<
* raise TypeError("Cannot index with type '%s'" % type(item))
*
*/
}
/* "View.MemoryView":691
* raise TypeError("Cannot index with type '%s'" % type(item))
*
* have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<<
* result.append(item)
*
*/
__pyx_t_10 = (__pyx_v_have_slices != 0);
if (!__pyx_t_10) {
} else {
__pyx_t_1 = __pyx_t_10;
goto __pyx_L11_bool_binop_done;
}
__pyx_t_10 = PySlice_Check(__pyx_v_item);
__pyx_t_2 = (__pyx_t_10 != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L11_bool_binop_done:;
__pyx_v_have_slices = __pyx_t_1;
/* "View.MemoryView":692
*
* have_slices = have_slices or isinstance(item, slice)
* result.append(item) # <<<<<<<<<<<<<<
*
* nslices = ndim - len(result)
*/
__pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 692, __pyx_L1_error)
}
__pyx_L6:;
/* "View.MemoryView":679
* have_slices = False
* seen_ellipsis = False
* for idx, item in enumerate(tup): # <<<<<<<<<<<<<<
* if item is Ellipsis:
* if not seen_ellipsis:
*/
}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":694
* result.append(item)
*
* nslices = ndim - len(result) # <<<<<<<<<<<<<<
* if nslices:
* result.extend([slice(None)] * nslices)
*/
__pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(1, 694, __pyx_L1_error)
__pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5);
/* "View.MemoryView":695
*
* nslices = ndim - len(result)
* if nslices: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * nslices)
*
*/
__pyx_t_1 = (__pyx_v_nslices != 0);
if (__pyx_t_1) {
/* "View.MemoryView":696
* nslices = ndim - len(result)
* if nslices:
* result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<<
*
* return have_slices or nslices, tuple(result)
*/
__pyx_t_3 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 696, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
{ Py_ssize_t __pyx_temp;
for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) {
__Pyx_INCREF(__pyx_slice__16);
__Pyx_GIVEREF(__pyx_slice__16);
PyList_SET_ITEM(__pyx_t_3, __pyx_temp, __pyx_slice__16);
}
}
__pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_3); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 696, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":695
*
* nslices = ndim - len(result)
* if nslices: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * nslices)
*
*/
}
/* "View.MemoryView":698
* result.extend([slice(None)] * nslices)
*
* return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<<
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
*/
__Pyx_XDECREF(__pyx_r);
if (!__pyx_v_have_slices) {
} else {
__pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = __pyx_t_4;
__pyx_t_4 = 0;
goto __pyx_L14_bool_binop_done;
}
__pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = __pyx_t_4;
__pyx_t_4 = 0;
__pyx_L14_bool_binop_done:;
__pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_11 = PyTuple_New(2); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 698, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_11);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_4);
__pyx_t_3 = 0;
__pyx_t_4 = 0;
__pyx_r = ((PyObject*)__pyx_t_11);
__pyx_t_11 = 0;
goto __pyx_L0;
/* "View.MemoryView":666
* return isinstance(o, memoryview)
*
* cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<<
* """
* Replace all ellipses with full slices and fill incomplete indices with
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_11);
__Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_tup);
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XDECREF(__pyx_v_idx);
__Pyx_XDECREF(__pyx_v_item);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":700
* return have_slices or nslices, tuple(result)
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<<
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
*/
static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) {
Py_ssize_t __pyx_v_suboffset;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
Py_ssize_t *__pyx_t_1;
Py_ssize_t *__pyx_t_2;
Py_ssize_t *__pyx_t_3;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("assert_direct_dimensions", 0);
/* "View.MemoryView":701
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
* for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<<
* if suboffset >= 0:
* raise ValueError("Indirect dimensions not supported")
*/
__pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim);
for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) {
__pyx_t_1 = __pyx_t_3;
__pyx_v_suboffset = (__pyx_t_1[0]);
/* "View.MemoryView":702
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* raise ValueError("Indirect dimensions not supported")
*
*/
__pyx_t_4 = ((__pyx_v_suboffset >= 0) != 0);
if (unlikely(__pyx_t_4)) {
/* "View.MemoryView":703
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
* raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 703, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_Raise(__pyx_t_5, 0, 0, 0);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__PYX_ERR(1, 703, __pyx_L1_error)
/* "View.MemoryView":702
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* raise ValueError("Indirect dimensions not supported")
*
*/
}
}
/* "View.MemoryView":700
* return have_slices or nslices, tuple(result)
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<<
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":710
*
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<<
* cdef int new_ndim = 0, suboffset_dim = -1, dim
* cdef bint negative_step
*/
static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) {
int __pyx_v_new_ndim;
int __pyx_v_suboffset_dim;
int __pyx_v_dim;
__Pyx_memviewslice __pyx_v_src;
__Pyx_memviewslice __pyx_v_dst;
__Pyx_memviewslice *__pyx_v_p_src;
struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0;
__Pyx_memviewslice *__pyx_v_p_dst;
int *__pyx_v_p_suboffset_dim;
Py_ssize_t __pyx_v_start;
Py_ssize_t __pyx_v_stop;
Py_ssize_t __pyx_v_step;
int __pyx_v_have_start;
int __pyx_v_have_stop;
int __pyx_v_have_step;
PyObject *__pyx_v_index = NULL;
struct __pyx_memoryview_obj *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
struct __pyx_memoryview_obj *__pyx_t_4;
char *__pyx_t_5;
int __pyx_t_6;
Py_ssize_t __pyx_t_7;
PyObject *(*__pyx_t_8)(PyObject *);
PyObject *__pyx_t_9 = NULL;
Py_ssize_t __pyx_t_10;
int __pyx_t_11;
Py_ssize_t __pyx_t_12;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memview_slice", 0);
/* "View.MemoryView":711
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices):
* cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<<
* cdef bint negative_step
* cdef __Pyx_memviewslice src, dst
*/
__pyx_v_new_ndim = 0;
__pyx_v_suboffset_dim = -1;
/* "View.MemoryView":718
*
*
* memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<<
*
* cdef _memoryviewslice memviewsliceobj
*/
(void)(memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst))));
/* "View.MemoryView":722
* cdef _memoryviewslice memviewsliceobj
*
* assert memview.view.ndim > 0 # <<<<<<<<<<<<<<
*
* if isinstance(memview, _memoryviewslice):
*/
#ifndef CYTHON_WITHOUT_ASSERTIONS
if (unlikely(!Py_OptimizeFlag)) {
if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) {
PyErr_SetNone(PyExc_AssertionError);
__PYX_ERR(1, 722, __pyx_L1_error)
}
}
#endif
/* "View.MemoryView":724
* assert memview.view.ndim > 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* memviewsliceobj = memview
* p_src = &memviewsliceobj.from_slice
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":725
*
* if isinstance(memview, _memoryviewslice):
* memviewsliceobj = memview # <<<<<<<<<<<<<<
* p_src = &memviewsliceobj.from_slice
* else:
*/
if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 725, __pyx_L1_error)
__pyx_t_3 = ((PyObject *)__pyx_v_memview);
__Pyx_INCREF(__pyx_t_3);
__pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":726
* if isinstance(memview, _memoryviewslice):
* memviewsliceobj = memview
* p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<<
* else:
* slice_copy(memview, &src)
*/
__pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice);
/* "View.MemoryView":724
* assert memview.view.ndim > 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* memviewsliceobj = memview
* p_src = &memviewsliceobj.from_slice
*/
goto __pyx_L3;
}
/* "View.MemoryView":728
* p_src = &memviewsliceobj.from_slice
* else:
* slice_copy(memview, &src) # <<<<<<<<<<<<<<
* p_src = &src
*
*/
/*else*/ {
__pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src));
/* "View.MemoryView":729
* else:
* slice_copy(memview, &src)
* p_src = &src # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_p_src = (&__pyx_v_src);
}
__pyx_L3:;
/* "View.MemoryView":735
*
*
* dst.memview = p_src.memview # <<<<<<<<<<<<<<
* dst.data = p_src.data
*
*/
__pyx_t_4 = __pyx_v_p_src->memview;
__pyx_v_dst.memview = __pyx_t_4;
/* "View.MemoryView":736
*
* dst.memview = p_src.memview
* dst.data = p_src.data # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __pyx_v_p_src->data;
__pyx_v_dst.data = __pyx_t_5;
/* "View.MemoryView":741
*
*
* cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<<
* cdef int *p_suboffset_dim = &suboffset_dim
* cdef Py_ssize_t start, stop, step
*/
__pyx_v_p_dst = (&__pyx_v_dst);
/* "View.MemoryView":742
*
* cdef __Pyx_memviewslice *p_dst = &dst
* cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<<
* cdef Py_ssize_t start, stop, step
* cdef bint have_start, have_stop, have_step
*/
__pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim);
/* "View.MemoryView":746
* cdef bint have_start, have_stop, have_step
*
* for dim, index in enumerate(indices): # <<<<<<<<<<<<<<
* if PyIndex_Check(index):
* slice_memviewslice(
*/
__pyx_t_6 = 0;
if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) {
__pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0;
__pyx_t_8 = NULL;
} else {
__pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 746, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 746, __pyx_L1_error)
}
for (;;) {
if (likely(!__pyx_t_8)) {
if (likely(PyList_CheckExact(__pyx_t_3))) {
if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 746, __pyx_L1_error)
#else
__pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
#endif
} else {
if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 746, __pyx_L1_error)
#else
__pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
#endif
}
} else {
__pyx_t_9 = __pyx_t_8(__pyx_t_3);
if (unlikely(!__pyx_t_9)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else __PYX_ERR(1, 746, __pyx_L1_error)
}
break;
}
__Pyx_GOTREF(__pyx_t_9);
}
__Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9);
__pyx_t_9 = 0;
__pyx_v_dim = __pyx_t_6;
__pyx_t_6 = (__pyx_t_6 + 1);
/* "View.MemoryView":747
*
* for dim, index in enumerate(indices):
* if PyIndex_Check(index): # <<<<<<<<<<<<<<
* slice_memviewslice(
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
*/
__pyx_t_2 = (PyIndex_Check(__pyx_v_index) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":751
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
* index, 0, 0, # start, stop, step # <<<<<<<<<<<<<<
* 0, 0, 0, # have_{start,stop,step}
* False)
*/
__pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 751, __pyx_L1_error)
/* "View.MemoryView":748
* for dim, index in enumerate(indices):
* if PyIndex_Check(index):
* slice_memviewslice( # <<<<<<<<<<<<<<
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
*/
__pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 748, __pyx_L1_error)
/* "View.MemoryView":747
*
* for dim, index in enumerate(indices):
* if PyIndex_Check(index): # <<<<<<<<<<<<<<
* slice_memviewslice(
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
*/
goto __pyx_L6;
}
/* "View.MemoryView":754
* 0, 0, 0, # have_{start,stop,step}
* False)
* elif index is None: # <<<<<<<<<<<<<<
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0
*/
__pyx_t_2 = (__pyx_v_index == Py_None);
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":755
* False)
* elif index is None:
* p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<<
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1
*/
(__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1;
/* "View.MemoryView":756
* elif index is None:
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<<
* p_dst.suboffsets[new_ndim] = -1
* new_ndim += 1
*/
(__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0;
/* "View.MemoryView":757
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<<
* new_ndim += 1
* else:
*/
(__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1L;
/* "View.MemoryView":758
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1
* new_ndim += 1 # <<<<<<<<<<<<<<
* else:
* start = index.start or 0
*/
__pyx_v_new_ndim = (__pyx_v_new_ndim + 1);
/* "View.MemoryView":754
* 0, 0, 0, # have_{start,stop,step}
* False)
* elif index is None: # <<<<<<<<<<<<<<
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0
*/
goto __pyx_L6;
}
/* "View.MemoryView":760
* new_ndim += 1
* else:
* start = index.start or 0 # <<<<<<<<<<<<<<
* stop = index.stop or 0
* step = index.step or 0
*/
/*else*/ {
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 760, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 760, __pyx_L1_error)
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else {
__pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 760, __pyx_L1_error)
__pyx_t_10 = __pyx_t_12;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
goto __pyx_L7_bool_binop_done;
}
__pyx_t_10 = 0;
__pyx_L7_bool_binop_done:;
__pyx_v_start = __pyx_t_10;
/* "View.MemoryView":761
* else:
* start = index.start or 0
* stop = index.stop or 0 # <<<<<<<<<<<<<<
* step = index.step or 0
*
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 761, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 761, __pyx_L1_error)
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else {
__pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 761, __pyx_L1_error)
__pyx_t_10 = __pyx_t_12;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
goto __pyx_L9_bool_binop_done;
}
__pyx_t_10 = 0;
__pyx_L9_bool_binop_done:;
__pyx_v_stop = __pyx_t_10;
/* "View.MemoryView":762
* start = index.start or 0
* stop = index.stop or 0
* step = index.step or 0 # <<<<<<<<<<<<<<
*
* have_start = index.start is not None
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 762, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 762, __pyx_L1_error)
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else {
__pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 762, __pyx_L1_error)
__pyx_t_10 = __pyx_t_12;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
goto __pyx_L11_bool_binop_done;
}
__pyx_t_10 = 0;
__pyx_L11_bool_binop_done:;
__pyx_v_step = __pyx_t_10;
/* "View.MemoryView":764
* step = index.step or 0
*
* have_start = index.start is not None # <<<<<<<<<<<<<<
* have_stop = index.stop is not None
* have_step = index.step is not None
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 764, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = (__pyx_t_9 != Py_None);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_have_start = __pyx_t_1;
/* "View.MemoryView":765
*
* have_start = index.start is not None
* have_stop = index.stop is not None # <<<<<<<<<<<<<<
* have_step = index.step is not None
*
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 765, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = (__pyx_t_9 != Py_None);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_have_stop = __pyx_t_1;
/* "View.MemoryView":766
* have_start = index.start is not None
* have_stop = index.stop is not None
* have_step = index.step is not None # <<<<<<<<<<<<<<
*
* slice_memviewslice(
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 766, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = (__pyx_t_9 != Py_None);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_have_step = __pyx_t_1;
/* "View.MemoryView":768
* have_step = index.step is not None
*
* slice_memviewslice( # <<<<<<<<<<<<<<
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
*/
__pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 768, __pyx_L1_error)
/* "View.MemoryView":774
* have_start, have_stop, have_step,
* True)
* new_ndim += 1 # <<<<<<<<<<<<<<
*
* if isinstance(memview, _memoryviewslice):
*/
__pyx_v_new_ndim = (__pyx_v_new_ndim + 1);
}
__pyx_L6:;
/* "View.MemoryView":746
* cdef bint have_start, have_stop, have_step
*
* for dim, index in enumerate(indices): # <<<<<<<<<<<<<<
* if PyIndex_Check(index):
* slice_memviewslice(
*/
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":776
* new_ndim += 1
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func,
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":777
*
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<<
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func,
*/
__Pyx_XDECREF(((PyObject *)__pyx_r));
/* "View.MemoryView":778
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func, # <<<<<<<<<<<<<<
* memviewsliceobj.to_dtype_func,
* memview.dtype_is_object)
*/
if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 778, __pyx_L1_error) }
/* "View.MemoryView":779
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
* else:
*/
if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 779, __pyx_L1_error) }
/* "View.MemoryView":777
*
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<<
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func,
*/
__pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 777, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 777, __pyx_L1_error)
__pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3);
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":776
* new_ndim += 1
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func,
*/
}
/* "View.MemoryView":782
* memview.dtype_is_object)
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
*
*/
/*else*/ {
__Pyx_XDECREF(((PyObject *)__pyx_r));
/* "View.MemoryView":783
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL,
* memview.dtype_is_object) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 782, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
/* "View.MemoryView":782
* memview.dtype_is_object)
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
*
*/
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 782, __pyx_L1_error)
__pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3);
__pyx_t_3 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":710
*
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<<
* cdef int new_ndim = 0, suboffset_dim = -1, dim
* cdef bint negative_step
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj);
__Pyx_XDECREF(__pyx_v_index);
__Pyx_XGIVEREF((PyObject *)__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":807
*
* @cname('__pyx_memoryview_slice_memviewslice')
* cdef int slice_memviewslice( # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset,
*/
static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) {
Py_ssize_t __pyx_v_new_shape;
int __pyx_v_negative_step;
int __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "View.MemoryView":827
* cdef bint negative_step
*
* if not is_slice: # <<<<<<<<<<<<<<
*
* if start < 0:
*/
__pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":829
* if not is_slice:
*
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if not 0 <= start < shape:
*/
__pyx_t_1 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":830
*
* if start < 0:
* start += shape # <<<<<<<<<<<<<<
* if not 0 <= start < shape:
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
*/
__pyx_v_start = (__pyx_v_start + __pyx_v_shape);
/* "View.MemoryView":829
* if not is_slice:
*
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if not 0 <= start < shape:
*/
}
/* "View.MemoryView":831
* if start < 0:
* start += shape
* if not 0 <= start < shape: # <<<<<<<<<<<<<<
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
* else:
*/
__pyx_t_1 = (0 <= __pyx_v_start);
if (__pyx_t_1) {
__pyx_t_1 = (__pyx_v_start < __pyx_v_shape);
}
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":832
* start += shape
* if not 0 <= start < shape:
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<<
* else:
*
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"Index out of bounds (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 832, __pyx_L1_error)
/* "View.MemoryView":831
* if start < 0:
* start += shape
* if not 0 <= start < shape: # <<<<<<<<<<<<<<
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
* else:
*/
}
/* "View.MemoryView":827
* cdef bint negative_step
*
* if not is_slice: # <<<<<<<<<<<<<<
*
* if start < 0:
*/
goto __pyx_L3;
}
/* "View.MemoryView":835
* else:
*
* negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<<
*
* if have_step and step == 0:
*/
/*else*/ {
__pyx_t_1 = ((__pyx_v_have_step != 0) != 0);
if (__pyx_t_1) {
} else {
__pyx_t_2 = __pyx_t_1;
goto __pyx_L6_bool_binop_done;
}
__pyx_t_1 = ((__pyx_v_step < 0) != 0);
__pyx_t_2 = __pyx_t_1;
__pyx_L6_bool_binop_done:;
__pyx_v_negative_step = __pyx_t_2;
/* "View.MemoryView":837
* negative_step = have_step != 0 and step < 0
*
* if have_step and step == 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Step may not be zero (axis %d)", dim)
*
*/
__pyx_t_1 = (__pyx_v_have_step != 0);
if (__pyx_t_1) {
} else {
__pyx_t_2 = __pyx_t_1;
goto __pyx_L9_bool_binop_done;
}
__pyx_t_1 = ((__pyx_v_step == 0) != 0);
__pyx_t_2 = __pyx_t_1;
__pyx_L9_bool_binop_done:;
if (__pyx_t_2) {
/* "View.MemoryView":838
*
* if have_step and step == 0:
* _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Step may not be zero (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 838, __pyx_L1_error)
/* "View.MemoryView":837
* negative_step = have_step != 0 and step < 0
*
* if have_step and step == 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Step may not be zero (axis %d)", dim)
*
*/
}
/* "View.MemoryView":841
*
*
* if have_start: # <<<<<<<<<<<<<<
* if start < 0:
* start += shape
*/
__pyx_t_2 = (__pyx_v_have_start != 0);
if (__pyx_t_2) {
/* "View.MemoryView":842
*
* if have_start:
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if start < 0:
*/
__pyx_t_2 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":843
* if have_start:
* if start < 0:
* start += shape # <<<<<<<<<<<<<<
* if start < 0:
* start = 0
*/
__pyx_v_start = (__pyx_v_start + __pyx_v_shape);
/* "View.MemoryView":844
* if start < 0:
* start += shape
* if start < 0: # <<<<<<<<<<<<<<
* start = 0
* elif start >= shape:
*/
__pyx_t_2 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":845
* start += shape
* if start < 0:
* start = 0 # <<<<<<<<<<<<<<
* elif start >= shape:
* if negative_step:
*/
__pyx_v_start = 0;
/* "View.MemoryView":844
* if start < 0:
* start += shape
* if start < 0: # <<<<<<<<<<<<<<
* start = 0
* elif start >= shape:
*/
}
/* "View.MemoryView":842
*
* if have_start:
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if start < 0:
*/
goto __pyx_L12;
}
/* "View.MemoryView":846
* if start < 0:
* start = 0
* elif start >= shape: # <<<<<<<<<<<<<<
* if negative_step:
* start = shape - 1
*/
__pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":847
* start = 0
* elif start >= shape:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":848
* elif start >= shape:
* if negative_step:
* start = shape - 1 # <<<<<<<<<<<<<<
* else:
* start = shape
*/
__pyx_v_start = (__pyx_v_shape - 1);
/* "View.MemoryView":847
* start = 0
* elif start >= shape:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
goto __pyx_L14;
}
/* "View.MemoryView":850
* start = shape - 1
* else:
* start = shape # <<<<<<<<<<<<<<
* else:
* if negative_step:
*/
/*else*/ {
__pyx_v_start = __pyx_v_shape;
}
__pyx_L14:;
/* "View.MemoryView":846
* if start < 0:
* start = 0
* elif start >= shape: # <<<<<<<<<<<<<<
* if negative_step:
* start = shape - 1
*/
}
__pyx_L12:;
/* "View.MemoryView":841
*
*
* if have_start: # <<<<<<<<<<<<<<
* if start < 0:
* start += shape
*/
goto __pyx_L11;
}
/* "View.MemoryView":852
* start = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
/*else*/ {
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":853
* else:
* if negative_step:
* start = shape - 1 # <<<<<<<<<<<<<<
* else:
* start = 0
*/
__pyx_v_start = (__pyx_v_shape - 1);
/* "View.MemoryView":852
* start = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
goto __pyx_L15;
}
/* "View.MemoryView":855
* start = shape - 1
* else:
* start = 0 # <<<<<<<<<<<<<<
*
* if have_stop:
*/
/*else*/ {
__pyx_v_start = 0;
}
__pyx_L15:;
}
__pyx_L11:;
/* "View.MemoryView":857
* start = 0
*
* if have_stop: # <<<<<<<<<<<<<<
* if stop < 0:
* stop += shape
*/
__pyx_t_2 = (__pyx_v_have_stop != 0);
if (__pyx_t_2) {
/* "View.MemoryView":858
*
* if have_stop:
* if stop < 0: # <<<<<<<<<<<<<<
* stop += shape
* if stop < 0:
*/
__pyx_t_2 = ((__pyx_v_stop < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":859
* if have_stop:
* if stop < 0:
* stop += shape # <<<<<<<<<<<<<<
* if stop < 0:
* stop = 0
*/
__pyx_v_stop = (__pyx_v_stop + __pyx_v_shape);
/* "View.MemoryView":860
* if stop < 0:
* stop += shape
* if stop < 0: # <<<<<<<<<<<<<<
* stop = 0
* elif stop > shape:
*/
__pyx_t_2 = ((__pyx_v_stop < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":861
* stop += shape
* if stop < 0:
* stop = 0 # <<<<<<<<<<<<<<
* elif stop > shape:
* stop = shape
*/
__pyx_v_stop = 0;
/* "View.MemoryView":860
* if stop < 0:
* stop += shape
* if stop < 0: # <<<<<<<<<<<<<<
* stop = 0
* elif stop > shape:
*/
}
/* "View.MemoryView":858
*
* if have_stop:
* if stop < 0: # <<<<<<<<<<<<<<
* stop += shape
* if stop < 0:
*/
goto __pyx_L17;
}
/* "View.MemoryView":862
* if stop < 0:
* stop = 0
* elif stop > shape: # <<<<<<<<<<<<<<
* stop = shape
* else:
*/
__pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":863
* stop = 0
* elif stop > shape:
* stop = shape # <<<<<<<<<<<<<<
* else:
* if negative_step:
*/
__pyx_v_stop = __pyx_v_shape;
/* "View.MemoryView":862
* if stop < 0:
* stop = 0
* elif stop > shape: # <<<<<<<<<<<<<<
* stop = shape
* else:
*/
}
__pyx_L17:;
/* "View.MemoryView":857
* start = 0
*
* if have_stop: # <<<<<<<<<<<<<<
* if stop < 0:
* stop += shape
*/
goto __pyx_L16;
}
/* "View.MemoryView":865
* stop = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* stop = -1
* else:
*/
/*else*/ {
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":866
* else:
* if negative_step:
* stop = -1 # <<<<<<<<<<<<<<
* else:
* stop = shape
*/
__pyx_v_stop = -1L;
/* "View.MemoryView":865
* stop = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* stop = -1
* else:
*/
goto __pyx_L19;
}
/* "View.MemoryView":868
* stop = -1
* else:
* stop = shape # <<<<<<<<<<<<<<
*
* if not have_step:
*/
/*else*/ {
__pyx_v_stop = __pyx_v_shape;
}
__pyx_L19:;
}
__pyx_L16:;
/* "View.MemoryView":870
* stop = shape
*
* if not have_step: # <<<<<<<<<<<<<<
* step = 1
*
*/
__pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":871
*
* if not have_step:
* step = 1 # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_step = 1;
/* "View.MemoryView":870
* stop = shape
*
* if not have_step: # <<<<<<<<<<<<<<
* step = 1
*
*/
}
/* "View.MemoryView":875
*
* with cython.cdivision(True):
* new_shape = (stop - start) // step # <<<<<<<<<<<<<<
*
* if (stop - start) - step * new_shape:
*/
__pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step);
/* "View.MemoryView":877
* new_shape = (stop - start) // step
*
* if (stop - start) - step * new_shape: # <<<<<<<<<<<<<<
* new_shape += 1
*
*/
__pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":878
*
* if (stop - start) - step * new_shape:
* new_shape += 1 # <<<<<<<<<<<<<<
*
* if new_shape < 0:
*/
__pyx_v_new_shape = (__pyx_v_new_shape + 1);
/* "View.MemoryView":877
* new_shape = (stop - start) // step
*
* if (stop - start) - step * new_shape: # <<<<<<<<<<<<<<
* new_shape += 1
*
*/
}
/* "View.MemoryView":880
* new_shape += 1
*
* if new_shape < 0: # <<<<<<<<<<<<<<
* new_shape = 0
*
*/
__pyx_t_2 = ((__pyx_v_new_shape < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":881
*
* if new_shape < 0:
* new_shape = 0 # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_new_shape = 0;
/* "View.MemoryView":880
* new_shape += 1
*
* if new_shape < 0: # <<<<<<<<<<<<<<
* new_shape = 0
*
*/
}
/* "View.MemoryView":884
*
*
* dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<<
* dst.shape[new_ndim] = new_shape
* dst.suboffsets[new_ndim] = suboffset
*/
(__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step);
/* "View.MemoryView":885
*
* dst.strides[new_ndim] = stride * step
* dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<<
* dst.suboffsets[new_ndim] = suboffset
*
*/
(__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape;
/* "View.MemoryView":886
* dst.strides[new_ndim] = stride * step
* dst.shape[new_ndim] = new_shape
* dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<<
*
*
*/
(__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset;
}
__pyx_L3:;
/* "View.MemoryView":889
*
*
* if suboffset_dim[0] < 0: # <<<<<<<<<<<<<<
* dst.data += start * stride
* else:
*/
__pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":890
*
* if suboffset_dim[0] < 0:
* dst.data += start * stride # <<<<<<<<<<<<<<
* else:
* dst.suboffsets[suboffset_dim[0]] += start * stride
*/
__pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride));
/* "View.MemoryView":889
*
*
* if suboffset_dim[0] < 0: # <<<<<<<<<<<<<<
* dst.data += start * stride
* else:
*/
goto __pyx_L23;
}
/* "View.MemoryView":892
* dst.data += start * stride
* else:
* dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<<
*
* if suboffset >= 0:
*/
/*else*/ {
__pyx_t_3 = (__pyx_v_suboffset_dim[0]);
(__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride));
}
__pyx_L23:;
/* "View.MemoryView":894
* dst.suboffsets[suboffset_dim[0]] += start * stride
*
* if suboffset >= 0: # <<<<<<<<<<<<<<
* if not is_slice:
* if new_ndim == 0:
*/
__pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":895
*
* if suboffset >= 0:
* if not is_slice: # <<<<<<<<<<<<<<
* if new_ndim == 0:
* dst.data = (<char **> dst.data)[0] + suboffset
*/
__pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":896
* if suboffset >= 0:
* if not is_slice:
* if new_ndim == 0: # <<<<<<<<<<<<<<
* dst.data = (<char **> dst.data)[0] + suboffset
* else:
*/
__pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":897
* if not is_slice:
* if new_ndim == 0:
* dst.data = (<char **> dst.data)[0] + suboffset # <<<<<<<<<<<<<<
* else:
* _err_dim(IndexError, "All dimensions preceding dimension %d "
*/
__pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset);
/* "View.MemoryView":896
* if suboffset >= 0:
* if not is_slice:
* if new_ndim == 0: # <<<<<<<<<<<<<<
* dst.data = (<char **> dst.data)[0] + suboffset
* else:
*/
goto __pyx_L26;
}
/* "View.MemoryView":899
* dst.data = (<char **> dst.data)[0] + suboffset
* else:
* _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<<
* "must be indexed and not sliced", dim)
* else:
*/
/*else*/ {
/* "View.MemoryView":900
* else:
* _err_dim(IndexError, "All dimensions preceding dimension %d "
* "must be indexed and not sliced", dim) # <<<<<<<<<<<<<<
* else:
* suboffset_dim[0] = new_ndim
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"All dimensions preceding dimension %d must be indexed and not sliced"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 899, __pyx_L1_error)
}
__pyx_L26:;
/* "View.MemoryView":895
*
* if suboffset >= 0:
* if not is_slice: # <<<<<<<<<<<<<<
* if new_ndim == 0:
* dst.data = (<char **> dst.data)[0] + suboffset
*/
goto __pyx_L25;
}
/* "View.MemoryView":902
* "must be indexed and not sliced", dim)
* else:
* suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<<
*
* return 0
*/
/*else*/ {
(__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim;
}
__pyx_L25:;
/* "View.MemoryView":894
* dst.suboffsets[suboffset_dim[0]] += start * stride
*
* if suboffset >= 0: # <<<<<<<<<<<<<<
* if not is_slice:
* if new_ndim == 0:
*/
}
/* "View.MemoryView":904
* suboffset_dim[0] = new_ndim
*
* return 0 # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":807
*
* @cname('__pyx_memoryview_slice_memviewslice')
* cdef int slice_memviewslice( # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = -1;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":910
*
* @cname('__pyx_pybuffer_index')
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<<
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
*/
static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) {
Py_ssize_t __pyx_v_shape;
Py_ssize_t __pyx_v_stride;
Py_ssize_t __pyx_v_suboffset;
Py_ssize_t __pyx_v_itemsize;
char *__pyx_v_resultp;
char *__pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("pybuffer_index", 0);
/* "View.MemoryView":912
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index,
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<<
* cdef Py_ssize_t itemsize = view.itemsize
* cdef char *resultp
*/
__pyx_v_suboffset = -1L;
/* "View.MemoryView":913
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
* cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<<
* cdef char *resultp
*
*/
__pyx_t_1 = __pyx_v_view->itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":916
* cdef char *resultp
*
* if view.ndim == 0: # <<<<<<<<<<<<<<
* shape = view.len / itemsize
* stride = itemsize
*/
__pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":917
*
* if view.ndim == 0:
* shape = view.len / itemsize # <<<<<<<<<<<<<<
* stride = itemsize
* else:
*/
if (unlikely(__pyx_v_itemsize == 0)) {
PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero");
__PYX_ERR(1, 917, __pyx_L1_error)
}
else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) {
PyErr_SetString(PyExc_OverflowError, "value too large to perform division");
__PYX_ERR(1, 917, __pyx_L1_error)
}
__pyx_v_shape = __Pyx_div_Py_ssize_t(__pyx_v_view->len, __pyx_v_itemsize);
/* "View.MemoryView":918
* if view.ndim == 0:
* shape = view.len / itemsize
* stride = itemsize # <<<<<<<<<<<<<<
* else:
* shape = view.shape[dim]
*/
__pyx_v_stride = __pyx_v_itemsize;
/* "View.MemoryView":916
* cdef char *resultp
*
* if view.ndim == 0: # <<<<<<<<<<<<<<
* shape = view.len / itemsize
* stride = itemsize
*/
goto __pyx_L3;
}
/* "View.MemoryView":920
* stride = itemsize
* else:
* shape = view.shape[dim] # <<<<<<<<<<<<<<
* stride = view.strides[dim]
* if view.suboffsets != NULL:
*/
/*else*/ {
__pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]);
/* "View.MemoryView":921
* else:
* shape = view.shape[dim]
* stride = view.strides[dim] # <<<<<<<<<<<<<<
* if view.suboffsets != NULL:
* suboffset = view.suboffsets[dim]
*/
__pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]);
/* "View.MemoryView":922
* shape = view.shape[dim]
* stride = view.strides[dim]
* if view.suboffsets != NULL: # <<<<<<<<<<<<<<
* suboffset = view.suboffsets[dim]
*
*/
__pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":923
* stride = view.strides[dim]
* if view.suboffsets != NULL:
* suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<<
*
* if index < 0:
*/
__pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]);
/* "View.MemoryView":922
* shape = view.shape[dim]
* stride = view.strides[dim]
* if view.suboffsets != NULL: # <<<<<<<<<<<<<<
* suboffset = view.suboffsets[dim]
*
*/
}
}
__pyx_L3:;
/* "View.MemoryView":925
* suboffset = view.suboffsets[dim]
*
* if index < 0: # <<<<<<<<<<<<<<
* index += view.shape[dim]
* if index < 0:
*/
__pyx_t_2 = ((__pyx_v_index < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":926
*
* if index < 0:
* index += view.shape[dim] # <<<<<<<<<<<<<<
* if index < 0:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*/
__pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim]));
/* "View.MemoryView":927
* if index < 0:
* index += view.shape[dim]
* if index < 0: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
__pyx_t_2 = ((__pyx_v_index < 0) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":928
* index += view.shape[dim]
* if index < 0:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<<
*
* if index >= shape:
*/
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 928, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 928, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 928, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 928, __pyx_L1_error)
/* "View.MemoryView":927
* if index < 0:
* index += view.shape[dim]
* if index < 0: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
}
/* "View.MemoryView":925
* suboffset = view.suboffsets[dim]
*
* if index < 0: # <<<<<<<<<<<<<<
* index += view.shape[dim]
* if index < 0:
*/
}
/* "View.MemoryView":930
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
* if index >= shape: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
__pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":931
*
* if index >= shape:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<<
*
* resultp = bufp + index * stride
*/
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 931, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 931, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 931, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 931, __pyx_L1_error)
/* "View.MemoryView":930
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
* if index >= shape: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
}
/* "View.MemoryView":933
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
* resultp = bufp + index * stride # <<<<<<<<<<<<<<
* if suboffset >= 0:
* resultp = (<char **> resultp)[0] + suboffset
*/
__pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride));
/* "View.MemoryView":934
*
* resultp = bufp + index * stride
* if suboffset >= 0: # <<<<<<<<<<<<<<
* resultp = (<char **> resultp)[0] + suboffset
*
*/
__pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":935
* resultp = bufp + index * stride
* if suboffset >= 0:
* resultp = (<char **> resultp)[0] + suboffset # <<<<<<<<<<<<<<
*
* return resultp
*/
__pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset);
/* "View.MemoryView":934
*
* resultp = bufp + index * stride
* if suboffset >= 0: # <<<<<<<<<<<<<<
* resultp = (<char **> resultp)[0] + suboffset
*
*/
}
/* "View.MemoryView":937
* resultp = (<char **> resultp)[0] + suboffset
*
* return resultp # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_resultp;
goto __pyx_L0;
/* "View.MemoryView":910
*
* @cname('__pyx_pybuffer_index')
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<<
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":943
*
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<<
* cdef int ndim = memslice.memview.view.ndim
*
*/
static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) {
int __pyx_v_ndim;
Py_ssize_t *__pyx_v_shape;
Py_ssize_t *__pyx_v_strides;
int __pyx_v_i;
int __pyx_v_j;
int __pyx_r;
int __pyx_t_1;
Py_ssize_t *__pyx_t_2;
long __pyx_t_3;
long __pyx_t_4;
Py_ssize_t __pyx_t_5;
Py_ssize_t __pyx_t_6;
int __pyx_t_7;
int __pyx_t_8;
int __pyx_t_9;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "View.MemoryView":944
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0:
* cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<<
*
* cdef Py_ssize_t *shape = memslice.shape
*/
__pyx_t_1 = __pyx_v_memslice->memview->view.ndim;
__pyx_v_ndim = __pyx_t_1;
/* "View.MemoryView":946
* cdef int ndim = memslice.memview.view.ndim
*
* cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<<
* cdef Py_ssize_t *strides = memslice.strides
*
*/
__pyx_t_2 = __pyx_v_memslice->shape;
__pyx_v_shape = __pyx_t_2;
/* "View.MemoryView":947
*
* cdef Py_ssize_t *shape = memslice.shape
* cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = __pyx_v_memslice->strides;
__pyx_v_strides = __pyx_t_2;
/* "View.MemoryView":951
*
* cdef int i, j
* for i in range(ndim / 2): # <<<<<<<<<<<<<<
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i]
*/
__pyx_t_3 = __Pyx_div_long(__pyx_v_ndim, 2);
__pyx_t_4 = __pyx_t_3;
for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_4; __pyx_t_1+=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":952
* cdef int i, j
* for i in range(ndim / 2):
* j = ndim - 1 - i # <<<<<<<<<<<<<<
* strides[i], strides[j] = strides[j], strides[i]
* shape[i], shape[j] = shape[j], shape[i]
*/
__pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i);
/* "View.MemoryView":953
* for i in range(ndim / 2):
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<<
* shape[i], shape[j] = shape[j], shape[i]
*
*/
__pyx_t_5 = (__pyx_v_strides[__pyx_v_j]);
__pyx_t_6 = (__pyx_v_strides[__pyx_v_i]);
(__pyx_v_strides[__pyx_v_i]) = __pyx_t_5;
(__pyx_v_strides[__pyx_v_j]) = __pyx_t_6;
/* "View.MemoryView":954
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i]
* shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<<
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0:
*/
__pyx_t_6 = (__pyx_v_shape[__pyx_v_j]);
__pyx_t_5 = (__pyx_v_shape[__pyx_v_i]);
(__pyx_v_shape[__pyx_v_i]) = __pyx_t_6;
(__pyx_v_shape[__pyx_v_j]) = __pyx_t_5;
/* "View.MemoryView":956
* shape[i], shape[j] = shape[j], shape[i]
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<<
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
*
*/
__pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0);
if (!__pyx_t_8) {
} else {
__pyx_t_7 = __pyx_t_8;
goto __pyx_L6_bool_binop_done;
}
__pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0);
__pyx_t_7 = __pyx_t_8;
__pyx_L6_bool_binop_done:;
if (__pyx_t_7) {
/* "View.MemoryView":957
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0:
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<<
*
* return 1
*/
__pyx_t_9 = __pyx_memoryview_err(__pyx_builtin_ValueError, ((char *)"Cannot transpose memoryview with indirect dimensions")); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 957, __pyx_L1_error)
/* "View.MemoryView":956
* shape[i], shape[j] = shape[j], shape[i]
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<<
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
*
*/
}
}
/* "View.MemoryView":959
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
*
* return 1 # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = 1;
goto __pyx_L0;
/* "View.MemoryView":943
*
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<<
* cdef int ndim = memslice.memview.view.ndim
*
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = 0;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":976
* cdef int (*to_dtype_func)(char *, object) except 0
*
* def __dealloc__(self): # <<<<<<<<<<<<<<
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
*/
/* Python wrapper */
static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":977
*
* def __dealloc__(self):
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<<
*
* cdef convert_item_to_object(self, char *itemp):
*/
__PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1);
/* "View.MemoryView":976
* cdef int (*to_dtype_func)(char *, object) except 0
*
* def __dealloc__(self): # <<<<<<<<<<<<<<
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":979
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* if self.to_object_func != NULL:
* return self.to_object_func(itemp)
*/
static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("convert_item_to_object", 0);
/* "View.MemoryView":980
*
* cdef convert_item_to_object(self, char *itemp):
* if self.to_object_func != NULL: # <<<<<<<<<<<<<<
* return self.to_object_func(itemp)
* else:
*/
__pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":981
* cdef convert_item_to_object(self, char *itemp):
* if self.to_object_func != NULL:
* return self.to_object_func(itemp) # <<<<<<<<<<<<<<
* else:
* return memoryview.convert_item_to_object(self, itemp)
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 981, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":980
*
* cdef convert_item_to_object(self, char *itemp):
* if self.to_object_func != NULL: # <<<<<<<<<<<<<<
* return self.to_object_func(itemp)
* else:
*/
}
/* "View.MemoryView":983
* return self.to_object_func(itemp)
* else:
* return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<<
*
* cdef assign_item_from_object(self, char *itemp, object value):
*/
/*else*/ {
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 983, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":979
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* if self.to_object_func != NULL:
* return self.to_object_func(itemp)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":985
* return memoryview.convert_item_to_object(self, itemp)
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value)
*/
static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("assign_item_from_object", 0);
/* "View.MemoryView":986
*
* cdef assign_item_from_object(self, char *itemp, object value):
* if self.to_dtype_func != NULL: # <<<<<<<<<<<<<<
* self.to_dtype_func(itemp, value)
* else:
*/
__pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":987
* cdef assign_item_from_object(self, char *itemp, object value):
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<<
* else:
* memoryview.assign_item_from_object(self, itemp, value)
*/
__pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 987, __pyx_L1_error)
/* "View.MemoryView":986
*
* cdef assign_item_from_object(self, char *itemp, object value):
* if self.to_dtype_func != NULL: # <<<<<<<<<<<<<<
* self.to_dtype_func(itemp, value)
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":989
* self.to_dtype_func(itemp, value)
* else:
* memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<<
*
* @property
*/
/*else*/ {
__pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 989, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
}
__pyx_L3:;
/* "View.MemoryView":985
* return memoryview.convert_item_to_object(self, itemp)
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":992
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.from_object
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":993
* @property
* def base(self):
* return self.from_object # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->from_object);
__pyx_r = __pyx_v_self->from_object;
goto __pyx_L0;
/* "View.MemoryView":992
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.from_object
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_memoryviewslice___reduce_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__18, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 2, __pyx_L1_error)
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_memoryviewslice_2__setstate_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__19, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 4, __pyx_L1_error)
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":999
*
* @cname('__pyx_memoryview_fromslice')
* cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<<
* int ndim,
* object (*to_object_func)(char *),
*/
static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) {
struct __pyx_memoryviewslice_obj *__pyx_v_result = 0;
Py_ssize_t __pyx_v_suboffset;
PyObject *__pyx_v_length = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
__Pyx_TypeInfo *__pyx_t_4;
Py_buffer __pyx_t_5;
Py_ssize_t *__pyx_t_6;
Py_ssize_t *__pyx_t_7;
Py_ssize_t *__pyx_t_8;
Py_ssize_t __pyx_t_9;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memoryview_fromslice", 0);
/* "View.MemoryView":1007
* cdef _memoryviewslice result
*
* if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<<
* return None
*
*/
__pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1008
*
* if <PyObject *> memviewslice.memview == Py_None:
* return None # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
/* "View.MemoryView":1007
* cdef _memoryviewslice result
*
* if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<<
* return None
*
*/
}
/* "View.MemoryView":1013
*
*
* result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<<
*
* result.from_slice = memviewslice
*/
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1013, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None);
__Pyx_INCREF(__pyx_int_0);
__Pyx_GIVEREF(__pyx_int_0);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryviewslice_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":1015
* result = _memoryviewslice(None, 0, dtype_is_object)
*
* result.from_slice = memviewslice # <<<<<<<<<<<<<<
* __PYX_INC_MEMVIEW(&memviewslice, 1)
*
*/
__pyx_v_result->from_slice = __pyx_v_memviewslice;
/* "View.MemoryView":1016
*
* result.from_slice = memviewslice
* __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<<
*
* result.from_object = (<memoryview> memviewslice.memview).base
*/
__PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1);
/* "View.MemoryView":1018
* __PYX_INC_MEMVIEW(&memviewslice, 1)
*
* result.from_object = (<memoryview> memviewslice.memview).base # <<<<<<<<<<<<<<
* result.typeinfo = memviewslice.memview.typeinfo
*
*/
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1018, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__Pyx_GOTREF(__pyx_v_result->from_object);
__Pyx_DECREF(__pyx_v_result->from_object);
__pyx_v_result->from_object = __pyx_t_2;
__pyx_t_2 = 0;
/* "View.MemoryView":1019
*
* result.from_object = (<memoryview> memviewslice.memview).base
* result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<<
*
* result.view = memviewslice.memview.view
*/
__pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo;
__pyx_v_result->__pyx_base.typeinfo = __pyx_t_4;
/* "View.MemoryView":1021
* result.typeinfo = memviewslice.memview.typeinfo
*
* result.view = memviewslice.memview.view # <<<<<<<<<<<<<<
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim
*/
__pyx_t_5 = __pyx_v_memviewslice.memview->view;
__pyx_v_result->__pyx_base.view = __pyx_t_5;
/* "View.MemoryView":1022
*
* result.view = memviewslice.memview.view
* result.view.buf = <void *> memviewslice.data # <<<<<<<<<<<<<<
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None
*/
__pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data);
/* "View.MemoryView":1023
* result.view = memviewslice.memview.view
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim # <<<<<<<<<<<<<<
* (<__pyx_buffer *> &result.view).obj = Py_None
* Py_INCREF(Py_None)
*/
__pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim;
/* "View.MemoryView":1024
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None;
/* "View.MemoryView":1025
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE:
*/
Py_INCREF(Py_None);
/* "View.MemoryView":1027
* Py_INCREF(Py_None)
*
* if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<<
* result.flags = PyBUF_RECORDS
* else:
*/
__pyx_t_1 = ((((struct __pyx_memoryview_obj *)__pyx_v_memviewslice.memview)->flags & PyBUF_WRITABLE) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1028
*
* if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE:
* result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<<
* else:
* result.flags = PyBUF_RECORDS_RO
*/
__pyx_v_result->__pyx_base.flags = PyBUF_RECORDS;
/* "View.MemoryView":1027
* Py_INCREF(Py_None)
*
* if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<<
* result.flags = PyBUF_RECORDS
* else:
*/
goto __pyx_L4;
}
/* "View.MemoryView":1030
* result.flags = PyBUF_RECORDS
* else:
* result.flags = PyBUF_RECORDS_RO # <<<<<<<<<<<<<<
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape
*/
/*else*/ {
__pyx_v_result->__pyx_base.flags = PyBUF_RECORDS_RO;
}
__pyx_L4:;
/* "View.MemoryView":1032
* result.flags = PyBUF_RECORDS_RO
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape # <<<<<<<<<<<<<<
* result.view.strides = <Py_ssize_t *> result.from_slice.strides
*
*/
__pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape);
/* "View.MemoryView":1033
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape
* result.view.strides = <Py_ssize_t *> result.from_slice.strides # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides);
/* "View.MemoryView":1036
*
*
* result.view.suboffsets = NULL # <<<<<<<<<<<<<<
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0:
*/
__pyx_v_result->__pyx_base.view.suboffsets = NULL;
/* "View.MemoryView":1037
*
* result.view.suboffsets = NULL
* for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<<
* if suboffset >= 0:
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
*/
__pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim);
for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) {
__pyx_t_6 = __pyx_t_8;
__pyx_v_suboffset = (__pyx_t_6[0]);
/* "View.MemoryView":1038
* result.view.suboffsets = NULL
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
* break
*/
__pyx_t_1 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1039
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0:
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets);
/* "View.MemoryView":1040
* if suboffset >= 0:
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
* break # <<<<<<<<<<<<<<
*
* result.view.len = result.view.itemsize
*/
goto __pyx_L6_break;
/* "View.MemoryView":1038
* result.view.suboffsets = NULL
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
* break
*/
}
}
__pyx_L6_break:;
/* "View.MemoryView":1042
* break
*
* result.view.len = result.view.itemsize # <<<<<<<<<<<<<<
* for length in result.view.shape[:ndim]:
* result.view.len *= length
*/
__pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize;
__pyx_v_result->__pyx_base.view.len = __pyx_t_9;
/* "View.MemoryView":1043
*
* result.view.len = result.view.itemsize
* for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<<
* result.view.len *= length
*
*/
__pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim);
for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) {
__pyx_t_6 = __pyx_t_8;
__pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1043, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":1044
* result.view.len = result.view.itemsize
* for length in result.view.shape[:ndim]:
* result.view.len *= length # <<<<<<<<<<<<<<
*
* result.to_object_func = to_object_func
*/
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1044, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1044, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 1044, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result->__pyx_base.view.len = __pyx_t_9;
}
/* "View.MemoryView":1046
* result.view.len *= length
*
* result.to_object_func = to_object_func # <<<<<<<<<<<<<<
* result.to_dtype_func = to_dtype_func
*
*/
__pyx_v_result->to_object_func = __pyx_v_to_object_func;
/* "View.MemoryView":1047
*
* result.to_object_func = to_object_func
* result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<<
*
* return result
*/
__pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func;
/* "View.MemoryView":1049
* result.to_dtype_func = to_dtype_func
*
* return result # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":999
*
* @cname('__pyx_memoryview_fromslice')
* cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<<
* int ndim,
* object (*to_object_func)(char *),
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XDECREF(__pyx_v_length);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1052
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
* cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *mslice) except NULL:
* cdef _memoryviewslice obj
*/
static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) {
struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0;
__Pyx_memviewslice *__pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("get_slice_from_memview", 0);
/* "View.MemoryView":1055
* __Pyx_memviewslice *mslice) except NULL:
* cdef _memoryviewslice obj
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* obj = memview
* return &obj.from_slice
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1056
* cdef _memoryviewslice obj
* if isinstance(memview, _memoryviewslice):
* obj = memview # <<<<<<<<<<<<<<
* return &obj.from_slice
* else:
*/
if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 1056, __pyx_L1_error)
__pyx_t_3 = ((PyObject *)__pyx_v_memview);
__Pyx_INCREF(__pyx_t_3);
__pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":1057
* if isinstance(memview, _memoryviewslice):
* obj = memview
* return &obj.from_slice # <<<<<<<<<<<<<<
* else:
* slice_copy(memview, mslice)
*/
__pyx_r = (&__pyx_v_obj->from_slice);
goto __pyx_L0;
/* "View.MemoryView":1055
* __Pyx_memviewslice *mslice) except NULL:
* cdef _memoryviewslice obj
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* obj = memview
* return &obj.from_slice
*/
}
/* "View.MemoryView":1059
* return &obj.from_slice
* else:
* slice_copy(memview, mslice) # <<<<<<<<<<<<<<
* return mslice
*
*/
/*else*/ {
__pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice);
/* "View.MemoryView":1060
* else:
* slice_copy(memview, mslice)
* return mslice # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_slice_copy')
*/
__pyx_r = __pyx_v_mslice;
goto __pyx_L0;
}
/* "View.MemoryView":1052
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
* cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *mslice) except NULL:
* cdef _memoryviewslice obj
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_obj);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1063
*
* @cname('__pyx_memoryview_slice_copy')
* cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<<
* cdef int dim
* cdef (Py_ssize_t*) shape, strides, suboffsets
*/
static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) {
int __pyx_v_dim;
Py_ssize_t *__pyx_v_shape;
Py_ssize_t *__pyx_v_strides;
Py_ssize_t *__pyx_v_suboffsets;
__Pyx_RefNannyDeclarations
Py_ssize_t *__pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
Py_ssize_t __pyx_t_5;
__Pyx_RefNannySetupContext("slice_copy", 0);
/* "View.MemoryView":1067
* cdef (Py_ssize_t*) shape, strides, suboffsets
*
* shape = memview.view.shape # <<<<<<<<<<<<<<
* strides = memview.view.strides
* suboffsets = memview.view.suboffsets
*/
__pyx_t_1 = __pyx_v_memview->view.shape;
__pyx_v_shape = __pyx_t_1;
/* "View.MemoryView":1068
*
* shape = memview.view.shape
* strides = memview.view.strides # <<<<<<<<<<<<<<
* suboffsets = memview.view.suboffsets
*
*/
__pyx_t_1 = __pyx_v_memview->view.strides;
__pyx_v_strides = __pyx_t_1;
/* "View.MemoryView":1069
* shape = memview.view.shape
* strides = memview.view.strides
* suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<<
*
* dst.memview = <__pyx_memoryview *> memview
*/
__pyx_t_1 = __pyx_v_memview->view.suboffsets;
__pyx_v_suboffsets = __pyx_t_1;
/* "View.MemoryView":1071
* suboffsets = memview.view.suboffsets
*
* dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<<
* dst.data = <char *> memview.view.buf
*
*/
__pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview);
/* "View.MemoryView":1072
*
* dst.memview = <__pyx_memoryview *> memview
* dst.data = <char *> memview.view.buf # <<<<<<<<<<<<<<
*
* for dim in range(memview.view.ndim):
*/
__pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf);
/* "View.MemoryView":1074
* dst.data = <char *> memview.view.buf
*
* for dim in range(memview.view.ndim): # <<<<<<<<<<<<<<
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim]
*/
__pyx_t_2 = __pyx_v_memview->view.ndim;
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_dim = __pyx_t_4;
/* "View.MemoryView":1075
*
* for dim in range(memview.view.ndim):
* dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<<
* dst.strides[dim] = strides[dim]
* dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1
*/
(__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]);
/* "View.MemoryView":1076
* for dim in range(memview.view.ndim):
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<<
* dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1
*
*/
(__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]);
/* "View.MemoryView":1077
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim]
* dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_object')
*/
if ((__pyx_v_suboffsets != 0)) {
__pyx_t_5 = (__pyx_v_suboffsets[__pyx_v_dim]);
} else {
__pyx_t_5 = -1L;
}
(__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_5;
}
/* "View.MemoryView":1063
*
* @cname('__pyx_memoryview_slice_copy')
* cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<<
* cdef int dim
* cdef (Py_ssize_t*) shape, strides, suboffsets
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":1080
*
* @cname('__pyx_memoryview_copy_object')
* cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<<
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
*/
static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) {
__Pyx_memviewslice __pyx_v_memviewslice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memoryview_copy", 0);
/* "View.MemoryView":1083
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
* slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<<
* return memoryview_copy_from_slice(memview, &memviewslice)
*
*/
__pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice));
/* "View.MemoryView":1084
* cdef __Pyx_memviewslice memviewslice
* slice_copy(memview, &memviewslice)
* return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_object_from_slice')
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1084, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":1080
*
* @cname('__pyx_memoryview_copy_object')
* cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<<
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1087
*
* @cname('__pyx_memoryview_copy_object_from_slice')
* cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<<
* """
* Create a new memoryview object from a given memoryview object and slice.
*/
static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) {
PyObject *(*__pyx_v_to_object_func)(char *);
int (*__pyx_v_to_dtype_func)(char *, PyObject *);
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *(*__pyx_t_3)(char *);
int (*__pyx_t_4)(char *, PyObject *);
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0);
/* "View.MemoryView":1094
* cdef int (*to_dtype_func)(char *, object) except 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* to_object_func = (<_memoryviewslice> memview).to_object_func
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1095
*
* if isinstance(memview, _memoryviewslice):
* to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<<
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
* else:
*/
__pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func;
__pyx_v_to_object_func = __pyx_t_3;
/* "View.MemoryView":1096
* if isinstance(memview, _memoryviewslice):
* to_object_func = (<_memoryviewslice> memview).to_object_func
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<<
* else:
* to_object_func = NULL
*/
__pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func;
__pyx_v_to_dtype_func = __pyx_t_4;
/* "View.MemoryView":1094
* cdef int (*to_dtype_func)(char *, object) except 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* to_object_func = (<_memoryviewslice> memview).to_object_func
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
*/
goto __pyx_L3;
}
/* "View.MemoryView":1098
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
* else:
* to_object_func = NULL # <<<<<<<<<<<<<<
* to_dtype_func = NULL
*
*/
/*else*/ {
__pyx_v_to_object_func = NULL;
/* "View.MemoryView":1099
* else:
* to_object_func = NULL
* to_dtype_func = NULL # <<<<<<<<<<<<<<
*
* return memoryview_fromslice(memviewslice[0], memview.view.ndim,
*/
__pyx_v_to_dtype_func = NULL;
}
__pyx_L3:;
/* "View.MemoryView":1101
* to_dtype_func = NULL
*
* return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<<
* to_object_func, to_dtype_func,
* memview.dtype_is_object)
*/
__Pyx_XDECREF(__pyx_r);
/* "View.MemoryView":1103
* return memoryview_fromslice(memviewslice[0], memview.view.ndim,
* to_object_func, to_dtype_func,
* memview.dtype_is_object) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1101, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "View.MemoryView":1087
*
* @cname('__pyx_memoryview_copy_object_from_slice')
* cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<<
* """
* Create a new memoryview object from a given memoryview object and slice.
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1109
*
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<<
* if arg < 0:
* return -arg
*/
static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) {
Py_ssize_t __pyx_r;
int __pyx_t_1;
/* "View.MemoryView":1110
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
* if arg < 0: # <<<<<<<<<<<<<<
* return -arg
* else:
*/
__pyx_t_1 = ((__pyx_v_arg < 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1111
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
* if arg < 0:
* return -arg # <<<<<<<<<<<<<<
* else:
* return arg
*/
__pyx_r = (-__pyx_v_arg);
goto __pyx_L0;
/* "View.MemoryView":1110
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
* if arg < 0: # <<<<<<<<<<<<<<
* return -arg
* else:
*/
}
/* "View.MemoryView":1113
* return -arg
* else:
* return arg # <<<<<<<<<<<<<<
*
* @cname('__pyx_get_best_slice_order')
*/
/*else*/ {
__pyx_r = __pyx_v_arg;
goto __pyx_L0;
}
/* "View.MemoryView":1109
*
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<<
* if arg < 0:
* return -arg
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1116
*
* @cname('__pyx_get_best_slice_order')
* cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<<
* """
* Figure out the best memory access order for a given slice.
*/
static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) {
int __pyx_v_i;
Py_ssize_t __pyx_v_c_stride;
Py_ssize_t __pyx_v_f_stride;
char __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
/* "View.MemoryView":1121
* """
* cdef int i
* cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<<
* cdef Py_ssize_t f_stride = 0
*
*/
__pyx_v_c_stride = 0;
/* "View.MemoryView":1122
* cdef int i
* cdef Py_ssize_t c_stride = 0
* cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<<
*
* for i in range(ndim - 1, -1, -1):
*/
__pyx_v_f_stride = 0;
/* "View.MemoryView":1124
* cdef Py_ssize_t f_stride = 0
*
* for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i]
*/
for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":1125
*
* for i in range(ndim - 1, -1, -1):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* c_stride = mslice.strides[i]
* break
*/
__pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1126
* for i in range(ndim - 1, -1, -1):
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i] # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1127
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i]
* break # <<<<<<<<<<<<<<
*
* for i in range(ndim):
*/
goto __pyx_L4_break;
/* "View.MemoryView":1125
*
* for i in range(ndim - 1, -1, -1):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* c_stride = mslice.strides[i]
* break
*/
}
}
__pyx_L4_break:;
/* "View.MemoryView":1129
* break
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i]
*/
__pyx_t_1 = __pyx_v_ndim;
__pyx_t_3 = __pyx_t_1;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":1130
*
* for i in range(ndim):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* f_stride = mslice.strides[i]
* break
*/
__pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1131
* for i in range(ndim):
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i] # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1132
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i]
* break # <<<<<<<<<<<<<<
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride):
*/
goto __pyx_L7_break;
/* "View.MemoryView":1130
*
* for i in range(ndim):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* f_stride = mslice.strides[i]
* break
*/
}
}
__pyx_L7_break:;
/* "View.MemoryView":1134
* break
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<<
* return 'C'
* else:
*/
__pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1135
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride):
* return 'C' # <<<<<<<<<<<<<<
* else:
* return 'F'
*/
__pyx_r = 'C';
goto __pyx_L0;
/* "View.MemoryView":1134
* break
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<<
* return 'C'
* else:
*/
}
/* "View.MemoryView":1137
* return 'C'
* else:
* return 'F' # <<<<<<<<<<<<<<
*
* @cython.cdivision(True)
*/
/*else*/ {
__pyx_r = 'F';
goto __pyx_L0;
}
/* "View.MemoryView":1116
*
* @cname('__pyx_get_best_slice_order')
* cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<<
* """
* Figure out the best memory access order for a given slice.
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1140
*
* @cython.cdivision(True)
* cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<<
* char *dst_data, Py_ssize_t *dst_strides,
* Py_ssize_t *src_shape, Py_ssize_t *dst_shape,
*/
static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent;
Py_ssize_t __pyx_v_dst_extent;
Py_ssize_t __pyx_v_src_stride;
Py_ssize_t __pyx_v_dst_stride;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
Py_ssize_t __pyx_t_4;
Py_ssize_t __pyx_t_5;
Py_ssize_t __pyx_t_6;
/* "View.MemoryView":1147
*
* cdef Py_ssize_t i
* cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0]
*/
__pyx_v_src_extent = (__pyx_v_src_shape[0]);
/* "View.MemoryView":1148
* cdef Py_ssize_t i
* cdef Py_ssize_t src_extent = src_shape[0]
* cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t src_stride = src_strides[0]
* cdef Py_ssize_t dst_stride = dst_strides[0]
*/
__pyx_v_dst_extent = (__pyx_v_dst_shape[0]);
/* "View.MemoryView":1149
* cdef Py_ssize_t src_extent = src_shape[0]
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t dst_stride = dst_strides[0]
*
*/
__pyx_v_src_stride = (__pyx_v_src_strides[0]);
/* "View.MemoryView":1150
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0]
* cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<<
*
* if ndim == 1:
*/
__pyx_v_dst_stride = (__pyx_v_dst_strides[0]);
/* "View.MemoryView":1152
* cdef Py_ssize_t dst_stride = dst_strides[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride):
*/
__pyx_t_1 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1153
*
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<<
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent)
*/
__pyx_t_2 = ((__pyx_v_src_stride > 0) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L5_bool_binop_done;
}
__pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L5_bool_binop_done;
}
/* "View.MemoryView":1154
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride): # <<<<<<<<<<<<<<
* memcpy(dst_data, src_data, itemsize * dst_extent)
* else:
*/
__pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize);
if (__pyx_t_2) {
__pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride));
}
__pyx_t_3 = (__pyx_t_2 != 0);
__pyx_t_1 = __pyx_t_3;
__pyx_L5_bool_binop_done:;
/* "View.MemoryView":1153
*
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<<
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent)
*/
if (__pyx_t_1) {
/* "View.MemoryView":1155
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<<
* else:
* for i in range(dst_extent):
*/
(void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent)));
/* "View.MemoryView":1153
*
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<<
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent)
*/
goto __pyx_L4;
}
/* "View.MemoryView":1157
* memcpy(dst_data, src_data, itemsize * dst_extent)
* else:
* for i in range(dst_extent): # <<<<<<<<<<<<<<
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride
*/
/*else*/ {
__pyx_t_4 = __pyx_v_dst_extent;
__pyx_t_5 = __pyx_t_4;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1158
* else:
* for i in range(dst_extent):
* memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<<
* src_data += src_stride
* dst_data += dst_stride
*/
(void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize));
/* "View.MemoryView":1159
* for i in range(dst_extent):
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride # <<<<<<<<<<<<<<
* dst_data += dst_stride
* else:
*/
__pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride);
/* "View.MemoryView":1160
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride
* dst_data += dst_stride # <<<<<<<<<<<<<<
* else:
* for i in range(dst_extent):
*/
__pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride);
}
}
__pyx_L4:;
/* "View.MemoryView":1152
* cdef Py_ssize_t dst_stride = dst_strides[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride):
*/
goto __pyx_L3;
}
/* "View.MemoryView":1162
* dst_data += dst_stride
* else:
* for i in range(dst_extent): # <<<<<<<<<<<<<<
* _copy_strided_to_strided(src_data, src_strides + 1,
* dst_data, dst_strides + 1,
*/
/*else*/ {
__pyx_t_4 = __pyx_v_dst_extent;
__pyx_t_5 = __pyx_t_4;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1163
* else:
* for i in range(dst_extent):
* _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<<
* dst_data, dst_strides + 1,
* src_shape + 1, dst_shape + 1,
*/
_copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize);
/* "View.MemoryView":1167
* src_shape + 1, dst_shape + 1,
* ndim - 1, itemsize)
* src_data += src_stride # <<<<<<<<<<<<<<
* dst_data += dst_stride
*
*/
__pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride);
/* "View.MemoryView":1168
* ndim - 1, itemsize)
* src_data += src_stride
* dst_data += dst_stride # <<<<<<<<<<<<<<
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src,
*/
__pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride);
}
}
__pyx_L3:;
/* "View.MemoryView":1140
*
* @cython.cdivision(True)
* cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<<
* char *dst_data, Py_ssize_t *dst_strides,
* Py_ssize_t *src_shape, Py_ssize_t *dst_shape,
*/
/* function exit code */
}
/* "View.MemoryView":1170
* dst_data += dst_stride
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
*/
static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) {
/* "View.MemoryView":1173
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
* _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<<
* src.shape, dst.shape, ndim, itemsize)
*
*/
_copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize);
/* "View.MemoryView":1170
* dst_data += dst_stride
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1177
*
* @cname('__pyx_memoryview_slice_get_size')
* cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<<
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef Py_ssize_t shape, size = src.memview.view.itemsize
*/
static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) {
Py_ssize_t __pyx_v_shape;
Py_ssize_t __pyx_v_size;
Py_ssize_t __pyx_r;
Py_ssize_t __pyx_t_1;
Py_ssize_t *__pyx_t_2;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
/* "View.MemoryView":1179
* cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil:
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef Py_ssize_t shape, size = src.memview.view.itemsize # <<<<<<<<<<<<<<
*
* for shape in src.shape[:ndim]:
*/
__pyx_t_1 = __pyx_v_src->memview->view.itemsize;
__pyx_v_size = __pyx_t_1;
/* "View.MemoryView":1181
* cdef Py_ssize_t shape, size = src.memview.view.itemsize
*
* for shape in src.shape[:ndim]: # <<<<<<<<<<<<<<
* size *= shape
*
*/
__pyx_t_3 = (__pyx_v_src->shape + __pyx_v_ndim);
for (__pyx_t_4 = __pyx_v_src->shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) {
__pyx_t_2 = __pyx_t_4;
__pyx_v_shape = (__pyx_t_2[0]);
/* "View.MemoryView":1182
*
* for shape in src.shape[:ndim]:
* size *= shape # <<<<<<<<<<<<<<
*
* return size
*/
__pyx_v_size = (__pyx_v_size * __pyx_v_shape);
}
/* "View.MemoryView":1184
* size *= shape
*
* return size # <<<<<<<<<<<<<<
*
* @cname('__pyx_fill_contig_strides_array')
*/
__pyx_r = __pyx_v_size;
goto __pyx_L0;
/* "View.MemoryView":1177
*
* @cname('__pyx_memoryview_slice_get_size')
* cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<<
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef Py_ssize_t shape, size = src.memview.view.itemsize
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1187
*
* @cname('__pyx_fill_contig_strides_array')
* cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<<
* Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride,
* int ndim, char order) nogil:
*/
static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) {
int __pyx_v_idx;
Py_ssize_t __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
/* "View.MemoryView":1196
* cdef int idx
*
* if order == 'F': # <<<<<<<<<<<<<<
* for idx in range(ndim):
* strides[idx] = stride
*/
__pyx_t_1 = ((__pyx_v_order == 'F') != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1197
*
* if order == 'F':
* for idx in range(ndim): # <<<<<<<<<<<<<<
* strides[idx] = stride
* stride *= shape[idx]
*/
__pyx_t_2 = __pyx_v_ndim;
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_idx = __pyx_t_4;
/* "View.MemoryView":1198
* if order == 'F':
* for idx in range(ndim):
* strides[idx] = stride # <<<<<<<<<<<<<<
* stride *= shape[idx]
* else:
*/
(__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride;
/* "View.MemoryView":1199
* for idx in range(ndim):
* strides[idx] = stride
* stride *= shape[idx] # <<<<<<<<<<<<<<
* else:
* for idx in range(ndim - 1, -1, -1):
*/
__pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx]));
}
/* "View.MemoryView":1196
* cdef int idx
*
* if order == 'F': # <<<<<<<<<<<<<<
* for idx in range(ndim):
* strides[idx] = stride
*/
goto __pyx_L3;
}
/* "View.MemoryView":1201
* stride *= shape[idx]
* else:
* for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* strides[idx] = stride
* stride *= shape[idx]
*/
/*else*/ {
for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1; __pyx_t_2-=1) {
__pyx_v_idx = __pyx_t_2;
/* "View.MemoryView":1202
* else:
* for idx in range(ndim - 1, -1, -1):
* strides[idx] = stride # <<<<<<<<<<<<<<
* stride *= shape[idx]
*
*/
(__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride;
/* "View.MemoryView":1203
* for idx in range(ndim - 1, -1, -1):
* strides[idx] = stride
* stride *= shape[idx] # <<<<<<<<<<<<<<
*
* return stride
*/
__pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx]));
}
}
__pyx_L3:;
/* "View.MemoryView":1205
* stride *= shape[idx]
*
* return stride # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_data_to_temp')
*/
__pyx_r = __pyx_v_stride;
goto __pyx_L0;
/* "View.MemoryView":1187
*
* @cname('__pyx_fill_contig_strides_array')
* cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<<
* Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride,
* int ndim, char order) nogil:
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1208
*
* @cname('__pyx_memoryview_copy_data_to_temp')
* cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *tmpslice,
* char order,
*/
static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) {
int __pyx_v_i;
void *__pyx_v_result;
size_t __pyx_v_itemsize;
size_t __pyx_v_size;
void *__pyx_r;
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
struct __pyx_memoryview_obj *__pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "View.MemoryView":1219
* cdef void *result
*
* cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<<
* cdef size_t size = slice_get_size(src, ndim)
*
*/
__pyx_t_1 = __pyx_v_src->memview->view.itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":1220
*
* cdef size_t itemsize = src.memview.view.itemsize
* cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<<
*
* result = malloc(size)
*/
__pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim);
/* "View.MemoryView":1222
* cdef size_t size = slice_get_size(src, ndim)
*
* result = malloc(size) # <<<<<<<<<<<<<<
* if not result:
* _err(MemoryError, NULL)
*/
__pyx_v_result = malloc(__pyx_v_size);
/* "View.MemoryView":1223
*
* result = malloc(size)
* if not result: # <<<<<<<<<<<<<<
* _err(MemoryError, NULL)
*
*/
__pyx_t_2 = ((!(__pyx_v_result != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1224
* result = malloc(size)
* if not result:
* _err(MemoryError, NULL) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 1224, __pyx_L1_error)
/* "View.MemoryView":1223
*
* result = malloc(size)
* if not result: # <<<<<<<<<<<<<<
* _err(MemoryError, NULL)
*
*/
}
/* "View.MemoryView":1227
*
*
* tmpslice.data = <char *> result # <<<<<<<<<<<<<<
* tmpslice.memview = src.memview
* for i in range(ndim):
*/
__pyx_v_tmpslice->data = ((char *)__pyx_v_result);
/* "View.MemoryView":1228
*
* tmpslice.data = <char *> result
* tmpslice.memview = src.memview # <<<<<<<<<<<<<<
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i]
*/
__pyx_t_4 = __pyx_v_src->memview;
__pyx_v_tmpslice->memview = __pyx_t_4;
/* "View.MemoryView":1229
* tmpslice.data = <char *> result
* tmpslice.memview = src.memview
* for i in range(ndim): # <<<<<<<<<<<<<<
* tmpslice.shape[i] = src.shape[i]
* tmpslice.suboffsets[i] = -1
*/
__pyx_t_3 = __pyx_v_ndim;
__pyx_t_5 = __pyx_t_3;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1230
* tmpslice.memview = src.memview
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<<
* tmpslice.suboffsets[i] = -1
*
*/
(__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]);
/* "View.MemoryView":1231
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i]
* tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<<
*
* fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize,
*/
(__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1L;
}
/* "View.MemoryView":1233
* tmpslice.suboffsets[i] = -1
*
* fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<<
* ndim, order)
*
*/
(void)(__pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order));
/* "View.MemoryView":1237
*
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if tmpslice.shape[i] == 1:
* tmpslice.strides[i] = 0
*/
__pyx_t_3 = __pyx_v_ndim;
__pyx_t_5 = __pyx_t_3;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1238
*
* for i in range(ndim):
* if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<<
* tmpslice.strides[i] = 0
*
*/
__pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1239
* for i in range(ndim):
* if tmpslice.shape[i] == 1:
* tmpslice.strides[i] = 0 # <<<<<<<<<<<<<<
*
* if slice_is_contig(src[0], order, ndim):
*/
(__pyx_v_tmpslice->strides[__pyx_v_i]) = 0;
/* "View.MemoryView":1238
*
* for i in range(ndim):
* if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<<
* tmpslice.strides[i] = 0
*
*/
}
}
/* "View.MemoryView":1241
* tmpslice.strides[i] = 0
*
* if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<<
* memcpy(result, src.data, size)
* else:
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig((__pyx_v_src[0]), __pyx_v_order, __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1242
*
* if slice_is_contig(src[0], order, ndim):
* memcpy(result, src.data, size) # <<<<<<<<<<<<<<
* else:
* copy_strided_to_strided(src, tmpslice, ndim, itemsize)
*/
(void)(memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size));
/* "View.MemoryView":1241
* tmpslice.strides[i] = 0
*
* if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<<
* memcpy(result, src.data, size)
* else:
*/
goto __pyx_L9;
}
/* "View.MemoryView":1244
* memcpy(result, src.data, size)
* else:
* copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<<
*
* return result
*/
/*else*/ {
copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize);
}
__pyx_L9:;
/* "View.MemoryView":1246
* copy_strided_to_strided(src, tmpslice, ndim, itemsize)
*
* return result # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_result;
goto __pyx_L0;
/* "View.MemoryView":1208
*
* @cname('__pyx_memoryview_copy_data_to_temp')
* cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *tmpslice,
* char order,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = NULL;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1251
*
* @cname('__pyx_memoryview_err_extents')
* cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<<
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
*/
static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err_extents", 0);
/* "View.MemoryView":1254
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
* (i, extent1, extent2)) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_err_dim')
*/
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1254, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1254, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1254, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1254, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_3 = 0;
/* "View.MemoryView":1253
* cdef int _err_extents(int i, Py_ssize_t extent1,
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<<
* (i, extent1, extent2))
*
*/
__pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1253, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1253, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_4, 0, 0, 0);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__PYX_ERR(1, 1253, __pyx_L1_error)
/* "View.MemoryView":1251
*
* @cname('__pyx_memoryview_err_extents')
* cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<<
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1257
*
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii') % dim)
*
*/
static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err_dim", 0);
__Pyx_INCREF(__pyx_v_error);
/* "View.MemoryView":1258
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil:
* raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_err')
*/
__pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1258, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1258, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyUnicode_Format(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1258, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_INCREF(__pyx_v_error);
__pyx_t_3 = __pyx_v_error; __pyx_t_2 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) {
__pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3);
if (likely(__pyx_t_2)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
__Pyx_INCREF(__pyx_t_2);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_3, function);
}
}
__pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4);
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1258, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 1258, __pyx_L1_error)
/* "View.MemoryView":1257
*
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii') % dim)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_XDECREF(__pyx_v_error);
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1261
*
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<<
* if msg != NULL:
* raise error(msg.decode('ascii'))
*/
static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err", 0);
__Pyx_INCREF(__pyx_v_error);
/* "View.MemoryView":1262
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil:
* if msg != NULL: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii'))
* else:
*/
__pyx_t_1 = ((__pyx_v_msg != NULL) != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":1263
* cdef int _err(object error, char *msg) except -1 with gil:
* if msg != NULL:
* raise error(msg.decode('ascii')) # <<<<<<<<<<<<<<
* else:
* raise error
*/
__pyx_t_3 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1263, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_error);
__pyx_t_4 = __pyx_v_error; __pyx_t_5 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_4, function);
}
}
__pyx_t_2 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3);
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1263, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__PYX_ERR(1, 1263, __pyx_L1_error)
/* "View.MemoryView":1262
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil:
* if msg != NULL: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii'))
* else:
*/
}
/* "View.MemoryView":1265
* raise error(msg.decode('ascii'))
* else:
* raise error # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_contents')
*/
/*else*/ {
__Pyx_Raise(__pyx_v_error, 0, 0, 0);
__PYX_ERR(1, 1265, __pyx_L1_error)
}
/* "View.MemoryView":1261
*
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<<
* if msg != NULL:
* raise error(msg.decode('ascii'))
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_XDECREF(__pyx_v_error);
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1268
*
* @cname('__pyx_memoryview_copy_contents')
* cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice dst,
* int src_ndim, int dst_ndim,
*/
static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) {
void *__pyx_v_tmpdata;
size_t __pyx_v_itemsize;
int __pyx_v_i;
char __pyx_v_order;
int __pyx_v_broadcasting;
int __pyx_v_direct_copy;
__Pyx_memviewslice __pyx_v_tmp;
int __pyx_v_ndim;
int __pyx_r;
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
void *__pyx_t_7;
int __pyx_t_8;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "View.MemoryView":1276
* Check for overlapping memory and verify the shapes.
* """
* cdef void *tmpdata = NULL # <<<<<<<<<<<<<<
* cdef size_t itemsize = src.memview.view.itemsize
* cdef int i
*/
__pyx_v_tmpdata = NULL;
/* "View.MemoryView":1277
* """
* cdef void *tmpdata = NULL
* cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<<
* cdef int i
* cdef char order = get_best_order(&src, src_ndim)
*/
__pyx_t_1 = __pyx_v_src.memview->view.itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":1279
* cdef size_t itemsize = src.memview.view.itemsize
* cdef int i
* cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<<
* cdef bint broadcasting = False
* cdef bint direct_copy = False
*/
__pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim);
/* "View.MemoryView":1280
* cdef int i
* cdef char order = get_best_order(&src, src_ndim)
* cdef bint broadcasting = False # <<<<<<<<<<<<<<
* cdef bint direct_copy = False
* cdef __Pyx_memviewslice tmp
*/
__pyx_v_broadcasting = 0;
/* "View.MemoryView":1281
* cdef char order = get_best_order(&src, src_ndim)
* cdef bint broadcasting = False
* cdef bint direct_copy = False # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice tmp
*
*/
__pyx_v_direct_copy = 0;
/* "View.MemoryView":1284
* cdef __Pyx_memviewslice tmp
*
* if src_ndim < dst_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim:
*/
__pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1285
*
* if src_ndim < dst_ndim:
* broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<<
* elif dst_ndim < src_ndim:
* broadcast_leading(&dst, dst_ndim, src_ndim)
*/
__pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim);
/* "View.MemoryView":1284
* cdef __Pyx_memviewslice tmp
*
* if src_ndim < dst_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim:
*/
goto __pyx_L3;
}
/* "View.MemoryView":1286
* if src_ndim < dst_ndim:
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&dst, dst_ndim, src_ndim)
*
*/
__pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1287
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim:
* broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<<
*
* cdef int ndim = max(src_ndim, dst_ndim)
*/
__pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim);
/* "View.MemoryView":1286
* if src_ndim < dst_ndim:
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&dst, dst_ndim, src_ndim)
*
*/
}
__pyx_L3:;
/* "View.MemoryView":1289
* broadcast_leading(&dst, dst_ndim, src_ndim)
*
* cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<<
*
* for i in range(ndim):
*/
__pyx_t_3 = __pyx_v_dst_ndim;
__pyx_t_4 = __pyx_v_src_ndim;
if (((__pyx_t_3 > __pyx_t_4) != 0)) {
__pyx_t_5 = __pyx_t_3;
} else {
__pyx_t_5 = __pyx_t_4;
}
__pyx_v_ndim = __pyx_t_5;
/* "View.MemoryView":1291
* cdef int ndim = max(src_ndim, dst_ndim)
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1:
*/
__pyx_t_5 = __pyx_v_ndim;
__pyx_t_3 = __pyx_t_5;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":1292
*
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<<
* if src.shape[i] == 1:
* broadcasting = True
*/
__pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1293
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1: # <<<<<<<<<<<<<<
* broadcasting = True
* src.strides[i] = 0
*/
__pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1294
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1:
* broadcasting = True # <<<<<<<<<<<<<<
* src.strides[i] = 0
* else:
*/
__pyx_v_broadcasting = 1;
/* "View.MemoryView":1295
* if src.shape[i] == 1:
* broadcasting = True
* src.strides[i] = 0 # <<<<<<<<<<<<<<
* else:
* _err_extents(i, dst.shape[i], src.shape[i])
*/
(__pyx_v_src.strides[__pyx_v_i]) = 0;
/* "View.MemoryView":1293
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1: # <<<<<<<<<<<<<<
* broadcasting = True
* src.strides[i] = 0
*/
goto __pyx_L7;
}
/* "View.MemoryView":1297
* src.strides[i] = 0
* else:
* _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<<
*
* if src.suboffsets[i] >= 0:
*/
/*else*/ {
__pyx_t_6 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1297, __pyx_L1_error)
}
__pyx_L7:;
/* "View.MemoryView":1292
*
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<<
* if src.shape[i] == 1:
* broadcasting = True
*/
}
/* "View.MemoryView":1299
* _err_extents(i, dst.shape[i], src.shape[i])
*
* if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
*/
__pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1300
*
* if src.suboffsets[i] >= 0:
* _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<<
*
* if slices_overlap(&src, &dst, ndim, itemsize):
*/
__pyx_t_6 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Dimension %d is not direct"), __pyx_v_i); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1300, __pyx_L1_error)
/* "View.MemoryView":1299
* _err_extents(i, dst.shape[i], src.shape[i])
*
* if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
*/
}
}
/* "View.MemoryView":1302
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
* if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<<
*
* if not slice_is_contig(src, order, ndim):
*/
__pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1304
* if slices_overlap(&src, &dst, ndim, itemsize):
*
* if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<<
* order = get_best_order(&dst, ndim)
*
*/
__pyx_t_2 = ((!(__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1305
*
* if not slice_is_contig(src, order, ndim):
* order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<<
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim)
*/
__pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim);
/* "View.MemoryView":1304
* if slices_overlap(&src, &dst, ndim, itemsize):
*
* if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<<
* order = get_best_order(&dst, ndim)
*
*/
}
/* "View.MemoryView":1307
* order = get_best_order(&dst, ndim)
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<<
* src = tmp
*
*/
__pyx_t_7 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_7 == ((void *)NULL))) __PYX_ERR(1, 1307, __pyx_L1_error)
__pyx_v_tmpdata = __pyx_t_7;
/* "View.MemoryView":1308
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim)
* src = tmp # <<<<<<<<<<<<<<
*
* if not broadcasting:
*/
__pyx_v_src = __pyx_v_tmp;
/* "View.MemoryView":1302
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
* if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<<
*
* if not slice_is_contig(src, order, ndim):
*/
}
/* "View.MemoryView":1310
* src = tmp
*
* if not broadcasting: # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1313
*
*
* if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim):
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'C', __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1314
*
* if slice_is_contig(src, 'C', ndim):
* direct_copy = slice_is_contig(dst, 'C', ndim) # <<<<<<<<<<<<<<
* elif slice_is_contig(src, 'F', ndim):
* direct_copy = slice_is_contig(dst, 'F', ndim)
*/
__pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'C', __pyx_v_ndim);
/* "View.MemoryView":1313
*
*
* if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim):
*/
goto __pyx_L12;
}
/* "View.MemoryView":1315
* if slice_is_contig(src, 'C', ndim):
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'F', __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1316
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim):
* direct_copy = slice_is_contig(dst, 'F', ndim) # <<<<<<<<<<<<<<
*
* if direct_copy:
*/
__pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'F', __pyx_v_ndim);
/* "View.MemoryView":1315
* if slice_is_contig(src, 'C', ndim):
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
*/
}
__pyx_L12:;
/* "View.MemoryView":1318
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
* if direct_copy: # <<<<<<<<<<<<<<
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
*/
__pyx_t_2 = (__pyx_v_direct_copy != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1320
* if direct_copy:
*
* refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1321
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
* memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<<
* refcount_copying(&dst, dtype_is_object, ndim, True)
* free(tmpdata)
*/
(void)(memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim)));
/* "View.MemoryView":1322
* refcount_copying(&dst, dtype_is_object, ndim, False)
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
* free(tmpdata)
* return 0
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1323
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True)
* free(tmpdata) # <<<<<<<<<<<<<<
* return 0
*
*/
free(__pyx_v_tmpdata);
/* "View.MemoryView":1324
* refcount_copying(&dst, dtype_is_object, ndim, True)
* free(tmpdata)
* return 0 # <<<<<<<<<<<<<<
*
* if order == 'F' == get_best_order(&dst, ndim):
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":1318
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
* if direct_copy: # <<<<<<<<<<<<<<
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
*/
}
/* "View.MemoryView":1310
* src = tmp
*
* if not broadcasting: # <<<<<<<<<<<<<<
*
*
*/
}
/* "View.MemoryView":1326
* return 0
*
* if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = (__pyx_v_order == 'F');
if (__pyx_t_2) {
__pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim));
}
__pyx_t_8 = (__pyx_t_2 != 0);
if (__pyx_t_8) {
/* "View.MemoryView":1329
*
*
* transpose_memslice(&src) # <<<<<<<<<<<<<<
* transpose_memslice(&dst)
*
*/
__pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1329, __pyx_L1_error)
/* "View.MemoryView":1330
*
* transpose_memslice(&src)
* transpose_memslice(&dst) # <<<<<<<<<<<<<<
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
*/
__pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1330, __pyx_L1_error)
/* "View.MemoryView":1326
* return 0
*
* if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<<
*
*
*/
}
/* "View.MemoryView":1332
* transpose_memslice(&dst)
*
* refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* copy_strided_to_strided(&src, &dst, ndim, itemsize)
* refcount_copying(&dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1333
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
* copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<<
* refcount_copying(&dst, dtype_is_object, ndim, True)
*
*/
copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize);
/* "View.MemoryView":1334
* refcount_copying(&dst, dtype_is_object, ndim, False)
* copy_strided_to_strided(&src, &dst, ndim, itemsize)
* refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
*
* free(tmpdata)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1336
* refcount_copying(&dst, dtype_is_object, ndim, True)
*
* free(tmpdata) # <<<<<<<<<<<<<<
* return 0
*
*/
free(__pyx_v_tmpdata);
/* "View.MemoryView":1337
*
* free(tmpdata)
* return 0 # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_broadcast_leading')
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":1268
*
* @cname('__pyx_memoryview_copy_contents')
* cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice dst,
* int src_ndim, int dst_ndim,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = -1;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1340
*
* @cname('__pyx_memoryview_broadcast_leading')
* cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<<
* int ndim,
* int ndim_other) nogil:
*/
static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) {
int __pyx_v_i;
int __pyx_v_offset;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
/* "View.MemoryView":1344
* int ndim_other) nogil:
* cdef int i
* cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<<
*
* for i in range(ndim - 1, -1, -1):
*/
__pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim);
/* "View.MemoryView":1346
* cdef int offset = ndim_other - ndim
*
* for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* mslice.shape[i + offset] = mslice.shape[i]
* mslice.strides[i + offset] = mslice.strides[i]
*/
for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":1347
*
* for i in range(ndim - 1, -1, -1):
* mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<<
* mslice.strides[i + offset] = mslice.strides[i]
* mslice.suboffsets[i + offset] = mslice.suboffsets[i]
*/
(__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]);
/* "View.MemoryView":1348
* for i in range(ndim - 1, -1, -1):
* mslice.shape[i + offset] = mslice.shape[i]
* mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<<
* mslice.suboffsets[i + offset] = mslice.suboffsets[i]
*
*/
(__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1349
* mslice.shape[i + offset] = mslice.shape[i]
* mslice.strides[i + offset] = mslice.strides[i]
* mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<<
*
* for i in range(offset):
*/
(__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]);
}
/* "View.MemoryView":1351
* mslice.suboffsets[i + offset] = mslice.suboffsets[i]
*
* for i in range(offset): # <<<<<<<<<<<<<<
* mslice.shape[i] = 1
* mslice.strides[i] = mslice.strides[0]
*/
__pyx_t_1 = __pyx_v_offset;
__pyx_t_2 = __pyx_t_1;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1352
*
* for i in range(offset):
* mslice.shape[i] = 1 # <<<<<<<<<<<<<<
* mslice.strides[i] = mslice.strides[0]
* mslice.suboffsets[i] = -1
*/
(__pyx_v_mslice->shape[__pyx_v_i]) = 1;
/* "View.MemoryView":1353
* for i in range(offset):
* mslice.shape[i] = 1
* mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<<
* mslice.suboffsets[i] = -1
*
*/
(__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]);
/* "View.MemoryView":1354
* mslice.shape[i] = 1
* mslice.strides[i] = mslice.strides[0]
* mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<<
*
*
*/
(__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1L;
}
/* "View.MemoryView":1340
*
* @cname('__pyx_memoryview_broadcast_leading')
* cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<<
* int ndim,
* int ndim_other) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1362
*
* @cname('__pyx_memoryview_refcount_copying')
* cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<<
* int ndim, bint inc) nogil:
*
*/
static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) {
int __pyx_t_1;
/* "View.MemoryView":1366
*
*
* if dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice_with_gil(dst.data, dst.shape,
* dst.strides, ndim, inc)
*/
__pyx_t_1 = (__pyx_v_dtype_is_object != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1367
*
* if dtype_is_object:
* refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<<
* dst.strides, ndim, inc)
*
*/
__pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc);
/* "View.MemoryView":1366
*
*
* if dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice_with_gil(dst.data, dst.shape,
* dst.strides, ndim, inc)
*/
}
/* "View.MemoryView":1362
*
* @cname('__pyx_memoryview_refcount_copying')
* cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<<
* int ndim, bint inc) nogil:
*
*/
/* function exit code */
}
/* "View.MemoryView":1371
*
* @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil')
* cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
*/
static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) {
__Pyx_RefNannyDeclarations
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0);
/* "View.MemoryView":1374
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
* refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc);
/* "View.MemoryView":1371
*
* @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil')
* cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
/* "View.MemoryView":1377
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
* cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim, bint inc):
* cdef Py_ssize_t i
*/
static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
Py_ssize_t __pyx_t_2;
Py_ssize_t __pyx_t_3;
int __pyx_t_4;
__Pyx_RefNannySetupContext("refcount_objects_in_slice", 0);
/* "View.MemoryView":1381
* cdef Py_ssize_t i
*
* for i in range(shape[0]): # <<<<<<<<<<<<<<
* if ndim == 1:
* if inc:
*/
__pyx_t_1 = (__pyx_v_shape[0]);
__pyx_t_2 = __pyx_t_1;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1382
*
* for i in range(shape[0]):
* if ndim == 1: # <<<<<<<<<<<<<<
* if inc:
* Py_INCREF((<PyObject **> data)[0])
*/
__pyx_t_4 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_4) {
/* "View.MemoryView":1383
* for i in range(shape[0]):
* if ndim == 1:
* if inc: # <<<<<<<<<<<<<<
* Py_INCREF((<PyObject **> data)[0])
* else:
*/
__pyx_t_4 = (__pyx_v_inc != 0);
if (__pyx_t_4) {
/* "View.MemoryView":1384
* if ndim == 1:
* if inc:
* Py_INCREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<<
* else:
* Py_DECREF((<PyObject **> data)[0])
*/
Py_INCREF((((PyObject **)__pyx_v_data)[0]));
/* "View.MemoryView":1383
* for i in range(shape[0]):
* if ndim == 1:
* if inc: # <<<<<<<<<<<<<<
* Py_INCREF((<PyObject **> data)[0])
* else:
*/
goto __pyx_L6;
}
/* "View.MemoryView":1386
* Py_INCREF((<PyObject **> data)[0])
* else:
* Py_DECREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<<
* else:
* refcount_objects_in_slice(data, shape + 1, strides + 1,
*/
/*else*/ {
Py_DECREF((((PyObject **)__pyx_v_data)[0]));
}
__pyx_L6:;
/* "View.MemoryView":1382
*
* for i in range(shape[0]):
* if ndim == 1: # <<<<<<<<<<<<<<
* if inc:
* Py_INCREF((<PyObject **> data)[0])
*/
goto __pyx_L5;
}
/* "View.MemoryView":1388
* Py_DECREF((<PyObject **> data)[0])
* else:
* refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<<
* ndim - 1, inc)
*
*/
/*else*/ {
/* "View.MemoryView":1389
* else:
* refcount_objects_in_slice(data, shape + 1, strides + 1,
* ndim - 1, inc) # <<<<<<<<<<<<<<
*
* data += strides[0]
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc);
}
__pyx_L5:;
/* "View.MemoryView":1391
* ndim - 1, inc)
*
* data += strides[0] # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0]));
}
/* "View.MemoryView":1377
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
* cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim, bint inc):
* cdef Py_ssize_t i
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":1397
*
* @cname('__pyx_memoryview_slice_assign_scalar')
* cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<<
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
*/
static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) {
/* "View.MemoryView":1400
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
* refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim,
* itemsize, item)
*/
__pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1401
* bint dtype_is_object) nogil:
* refcount_copying(dst, dtype_is_object, ndim, False)
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<<
* itemsize, item)
* refcount_copying(dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item);
/* "View.MemoryView":1403
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim,
* itemsize, item)
* refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
*
*
*/
__pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1397
*
* @cname('__pyx_memoryview_slice_assign_scalar')
* cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<<
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1407
*
* @cname('__pyx_memoryview__slice_assign_scalar')
* cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* size_t itemsize, void *item) nogil:
*/
static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
Py_ssize_t __pyx_v_stride;
Py_ssize_t __pyx_v_extent;
int __pyx_t_1;
Py_ssize_t __pyx_t_2;
Py_ssize_t __pyx_t_3;
Py_ssize_t __pyx_t_4;
/* "View.MemoryView":1411
* size_t itemsize, void *item) nogil:
* cdef Py_ssize_t i
* cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t extent = shape[0]
*
*/
__pyx_v_stride = (__pyx_v_strides[0]);
/* "View.MemoryView":1412
* cdef Py_ssize_t i
* cdef Py_ssize_t stride = strides[0]
* cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<<
*
* if ndim == 1:
*/
__pyx_v_extent = (__pyx_v_shape[0]);
/* "View.MemoryView":1414
* cdef Py_ssize_t extent = shape[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* for i in range(extent):
* memcpy(data, item, itemsize)
*/
__pyx_t_1 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1415
*
* if ndim == 1:
* for i in range(extent): # <<<<<<<<<<<<<<
* memcpy(data, item, itemsize)
* data += stride
*/
__pyx_t_2 = __pyx_v_extent;
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":1416
* if ndim == 1:
* for i in range(extent):
* memcpy(data, item, itemsize) # <<<<<<<<<<<<<<
* data += stride
* else:
*/
(void)(memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize));
/* "View.MemoryView":1417
* for i in range(extent):
* memcpy(data, item, itemsize)
* data += stride # <<<<<<<<<<<<<<
* else:
* for i in range(extent):
*/
__pyx_v_data = (__pyx_v_data + __pyx_v_stride);
}
/* "View.MemoryView":1414
* cdef Py_ssize_t extent = shape[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* for i in range(extent):
* memcpy(data, item, itemsize)
*/
goto __pyx_L3;
}
/* "View.MemoryView":1419
* data += stride
* else:
* for i in range(extent): # <<<<<<<<<<<<<<
* _slice_assign_scalar(data, shape + 1, strides + 1,
* ndim - 1, itemsize, item)
*/
/*else*/ {
__pyx_t_2 = __pyx_v_extent;
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":1420
* else:
* for i in range(extent):
* _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<<
* ndim - 1, itemsize, item)
* data += stride
*/
__pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item);
/* "View.MemoryView":1422
* _slice_assign_scalar(data, shape + 1, strides + 1,
* ndim - 1, itemsize, item)
* data += stride # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_data = (__pyx_v_data + __pyx_v_stride);
}
}
__pyx_L3:;
/* "View.MemoryView":1407
*
* @cname('__pyx_memoryview__slice_assign_scalar')
* cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* size_t itemsize, void *item) nogil:
*/
/* function exit code */
}
/* "(tree fragment)":1
* def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum = {"__pyx_unpickle_Enum", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum, METH_VARARGS|METH_KEYWORDS, 0};
static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v___pyx_type = 0;
long __pyx_v___pyx_checksum;
PyObject *__pyx_v___pyx_state = 0;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__pyx_unpickle_Enum (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 1); __PYX_ERR(1, 1, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 2); __PYX_ERR(1, 1, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_Enum") < 0)) __PYX_ERR(1, 1, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
}
__pyx_v___pyx_type = values[0];
__pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error)
__pyx_v___pyx_state = values[2];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 1, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_v___pyx_PickleError = 0;
PyObject *__pyx_v___pyx_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__pyx_unpickle_Enum", 0);
/* "(tree fragment)":4
* cdef object __pyx_PickleError
* cdef object __pyx_result
* if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<<
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
*/
__pyx_t_1 = ((__pyx_v___pyx_checksum != 0xb068931) != 0);
if (__pyx_t_1) {
/* "(tree fragment)":5
* cdef object __pyx_result
* if __pyx_checksum != 0xb068931:
* from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<<
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
* __pyx_result = Enum.__new__(__pyx_type)
*/
__pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_n_s_PickleError);
__Pyx_GIVEREF(__pyx_n_s_PickleError);
PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError);
__pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_t_2);
__pyx_v___pyx_PickleError = __pyx_t_2;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "(tree fragment)":6
* if __pyx_checksum != 0xb068931:
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) # <<<<<<<<<<<<<<
* __pyx_result = Enum.__new__(__pyx_type)
* if __pyx_state is not None:
*/
__pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_INCREF(__pyx_v___pyx_PickleError);
__pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4);
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 6, __pyx_L1_error)
/* "(tree fragment)":4
* cdef object __pyx_PickleError
* cdef object __pyx_result
* if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<<
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
*/
}
/* "(tree fragment)":7
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
* __pyx_result = Enum.__new__(__pyx_type) # <<<<<<<<<<<<<<
* if __pyx_state is not None:
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
*/
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_MemviewEnum_type), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_4)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type);
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v___pyx_result = __pyx_t_3;
__pyx_t_3 = 0;
/* "(tree fragment)":8
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
* __pyx_result = Enum.__new__(__pyx_type)
* if __pyx_state is not None: # <<<<<<<<<<<<<<
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
*/
__pyx_t_1 = (__pyx_v___pyx_state != Py_None);
__pyx_t_6 = (__pyx_t_1 != 0);
if (__pyx_t_6) {
/* "(tree fragment)":9
* __pyx_result = Enum.__new__(__pyx_type)
* if __pyx_state is not None:
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) # <<<<<<<<<<<<<<
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
*/
if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 9, __pyx_L1_error)
__pyx_t_3 = __pyx_unpickle_Enum__set_state(((struct __pyx_MemviewEnum_obj *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 9, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "(tree fragment)":8
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
* __pyx_result = Enum.__new__(__pyx_type)
* if __pyx_state is not None: # <<<<<<<<<<<<<<
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
*/
}
/* "(tree fragment)":10
* if __pyx_state is not None:
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result # <<<<<<<<<<<<<<
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
* __pyx_result.name = __pyx_state[0]
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v___pyx_result);
__pyx_r = __pyx_v___pyx_result;
goto __pyx_L0;
/* "(tree fragment)":1
* def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v___pyx_PickleError);
__Pyx_XDECREF(__pyx_v___pyx_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":11
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
*/
static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
Py_ssize_t __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__pyx_unpickle_Enum__set_state", 0);
/* "(tree fragment)":12
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
* __pyx_result.name = __pyx_state[0] # <<<<<<<<<<<<<<
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
* __pyx_result.__dict__.update(__pyx_state[1])
*/
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 12, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_v___pyx_result->name);
__Pyx_DECREF(__pyx_v___pyx_result->name);
__pyx_v___pyx_result->name = __pyx_t_1;
__pyx_t_1 = 0;
/* "(tree fragment)":13
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
* __pyx_result.__dict__.update(__pyx_state[1])
*/
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
__PYX_ERR(1, 13, __pyx_L1_error)
}
__pyx_t_3 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(1, 13, __pyx_L1_error)
__pyx_t_4 = ((__pyx_t_3 > 1) != 0);
if (__pyx_t_4) {
} else {
__pyx_t_2 = __pyx_t_4;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_4 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error)
__pyx_t_5 = (__pyx_t_4 != 0);
__pyx_t_2 = __pyx_t_5;
__pyx_L4_bool_binop_done:;
if (__pyx_t_2) {
/* "(tree fragment)":14
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
* __pyx_result.__dict__.update(__pyx_state[1]) # <<<<<<<<<<<<<<
*/
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_update); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 14, __pyx_L1_error)
}
__pyx_t_6 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_8 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) {
__pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7);
if (likely(__pyx_t_8)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7);
__Pyx_INCREF(__pyx_t_8);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_7, function);
}
}
__pyx_t_1 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_8, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6);
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":13
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
* __pyx_result.__dict__.update(__pyx_state[1])
*/
}
/* "(tree fragment)":11
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static struct __pyx_vtabstruct_array __pyx_vtable_array;
static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_array_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_array_obj *)o);
p->__pyx_vtab = __pyx_vtabptr_array;
p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None);
p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None);
if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) goto bad;
return o;
bad:
Py_DECREF(o); o = 0;
return NULL;
}
static void __pyx_tp_dealloc_array(PyObject *o) {
struct __pyx_array_obj *p = (struct __pyx_array_obj *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
__Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
__pyx_array___dealloc__(o);
__Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->mode);
Py_CLEAR(p->_format);
(*Py_TYPE(o)->tp_free)(o);
}
static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) {
PyObject *r;
PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0;
r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
Py_DECREF(x);
return r;
}
static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) {
if (v) {
return __pyx_array___setitem__(o, i, v);
}
else {
PyErr_Format(PyExc_NotImplementedError,
"Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name);
return -1;
}
}
static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) {
PyObject *v = __Pyx_PyObject_GenericGetAttr(o, n);
if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Clear();
v = __pyx_array___getattr__(o, n);
}
return v;
}
static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(o);
}
static PyMethodDef __pyx_methods_array[] = {
{"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0},
{"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_array_1__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_array_3__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets_array[] = {
{(char *)"memview", __pyx_getprop___pyx_array_memview, 0, (char *)0, 0},
{0, 0, 0, 0, 0}
};
static PySequenceMethods __pyx_tp_as_sequence_array = {
__pyx_array___len__, /*sq_length*/
0, /*sq_concat*/
0, /*sq_repeat*/
__pyx_sq_item_array, /*sq_item*/
0, /*sq_slice*/
0, /*sq_ass_item*/
0, /*sq_ass_slice*/
0, /*sq_contains*/
0, /*sq_inplace_concat*/
0, /*sq_inplace_repeat*/
};
static PyMappingMethods __pyx_tp_as_mapping_array = {
__pyx_array___len__, /*mp_length*/
__pyx_array___getitem__, /*mp_subscript*/
__pyx_mp_ass_subscript_array, /*mp_ass_subscript*/
};
static PyBufferProcs __pyx_tp_as_buffer_array = {
#if PY_MAJOR_VERSION < 3
0, /*bf_getreadbuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getwritebuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getsegcount*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getcharbuffer*/
#endif
__pyx_array_getbuffer, /*bf_getbuffer*/
0, /*bf_releasebuffer*/
};
static PyTypeObject __pyx_type___pyx_array = {
PyVarObject_HEAD_INIT(0, 0)
"monotonic_align.core.array", /*tp_name*/
sizeof(struct __pyx_array_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_array, /*tp_dealloc*/
#if PY_VERSION_HEX < 0x030800b4
0, /*tp_print*/
#endif
#if PY_VERSION_HEX >= 0x030800b4
0, /*tp_vectorcall_offset*/
#endif
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
0, /*tp_repr*/
0, /*tp_as_number*/
&__pyx_tp_as_sequence_array, /*tp_as_sequence*/
&__pyx_tp_as_mapping_array, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
__pyx_tp_getattro_array, /*tp_getattro*/
0, /*tp_setattro*/
&__pyx_tp_as_buffer_array, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/
0, /*tp_doc*/
0, /*tp_traverse*/
0, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_array, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets_array, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_array, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
};
static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
struct __pyx_MemviewEnum_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_MemviewEnum_obj *)o);
p->name = Py_None; Py_INCREF(Py_None);
return o;
}
static void __pyx_tp_dealloc_Enum(PyObject *o) {
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
Py_CLEAR(p->name);
(*Py_TYPE(o)->tp_free)(o);
}
static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
if (p->name) {
e = (*v)(p->name, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear_Enum(PyObject *o) {
PyObject* tmp;
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
tmp = ((PyObject*)p->name);
p->name = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
return 0;
}
static PyMethodDef __pyx_methods_Enum[] = {
{"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_1__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_3__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static PyTypeObject __pyx_type___pyx_MemviewEnum = {
PyVarObject_HEAD_INIT(0, 0)
"monotonic_align.core.Enum", /*tp_name*/
sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_Enum, /*tp_dealloc*/
#if PY_VERSION_HEX < 0x030800b4
0, /*tp_print*/
#endif
#if PY_VERSION_HEX >= 0x030800b4
0, /*tp_vectorcall_offset*/
#endif
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
__pyx_MemviewEnum___repr__, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
0, /*tp_doc*/
__pyx_tp_traverse_Enum, /*tp_traverse*/
__pyx_tp_clear_Enum, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_Enum, /*tp_methods*/
0, /*tp_members*/
0, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
__pyx_MemviewEnum___init__, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_Enum, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
};
static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview;
static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_memoryview_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_memoryview_obj *)o);
p->__pyx_vtab = __pyx_vtabptr_memoryview;
p->obj = Py_None; Py_INCREF(Py_None);
p->_size = Py_None; Py_INCREF(Py_None);
p->_array_interface = Py_None; Py_INCREF(Py_None);
p->view.obj = NULL;
if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) goto bad;
return o;
bad:
Py_DECREF(o); o = 0;
return NULL;
}
static void __pyx_tp_dealloc_memoryview(PyObject *o) {
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
__Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
__pyx_memoryview___dealloc__(o);
__Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->obj);
Py_CLEAR(p->_size);
Py_CLEAR(p->_array_interface);
(*Py_TYPE(o)->tp_free)(o);
}
static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
if (p->obj) {
e = (*v)(p->obj, a); if (e) return e;
}
if (p->_size) {
e = (*v)(p->_size, a); if (e) return e;
}
if (p->_array_interface) {
e = (*v)(p->_array_interface, a); if (e) return e;
}
if (p->view.obj) {
e = (*v)(p->view.obj, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear_memoryview(PyObject *o) {
PyObject* tmp;
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
tmp = ((PyObject*)p->obj);
p->obj = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->_size);
p->_size = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->_array_interface);
p->_array_interface = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
Py_CLEAR(p->view.obj);
return 0;
}
static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) {
PyObject *r;
PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0;
r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
Py_DECREF(x);
return r;
}
static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) {
if (v) {
return __pyx_memoryview___setitem__(o, i, v);
}
else {
PyErr_Format(PyExc_NotImplementedError,
"Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name);
return -1;
}
}
static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(o);
}
static PyMethodDef __pyx_methods_memoryview[] = {
{"is_c_contig", (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, 0},
{"is_f_contig", (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, 0},
{"copy", (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, 0},
{"copy_fortran", (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, 0},
{"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_1__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_3__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets_memoryview[] = {
{(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, (char *)0, 0},
{(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, (char *)0, 0},
{(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, (char *)0, 0},
{(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, (char *)0, 0},
{(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, (char *)0, 0},
{(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, (char *)0, 0},
{(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, (char *)0, 0},
{(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, (char *)0, 0},
{(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, (char *)0, 0},
{0, 0, 0, 0, 0}
};
static PySequenceMethods __pyx_tp_as_sequence_memoryview = {
__pyx_memoryview___len__, /*sq_length*/
0, /*sq_concat*/
0, /*sq_repeat*/
__pyx_sq_item_memoryview, /*sq_item*/
0, /*sq_slice*/
0, /*sq_ass_item*/
0, /*sq_ass_slice*/
0, /*sq_contains*/
0, /*sq_inplace_concat*/
0, /*sq_inplace_repeat*/
};
static PyMappingMethods __pyx_tp_as_mapping_memoryview = {
__pyx_memoryview___len__, /*mp_length*/
__pyx_memoryview___getitem__, /*mp_subscript*/
__pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/
};
static PyBufferProcs __pyx_tp_as_buffer_memoryview = {
#if PY_MAJOR_VERSION < 3
0, /*bf_getreadbuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getwritebuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getsegcount*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getcharbuffer*/
#endif
__pyx_memoryview_getbuffer, /*bf_getbuffer*/
0, /*bf_releasebuffer*/
};
static PyTypeObject __pyx_type___pyx_memoryview = {
PyVarObject_HEAD_INIT(0, 0)
"monotonic_align.core.memoryview", /*tp_name*/
sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_memoryview, /*tp_dealloc*/
#if PY_VERSION_HEX < 0x030800b4
0, /*tp_print*/
#endif
#if PY_VERSION_HEX >= 0x030800b4
0, /*tp_vectorcall_offset*/
#endif
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
__pyx_memoryview___repr__, /*tp_repr*/
0, /*tp_as_number*/
&__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/
&__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
__pyx_memoryview___str__, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
&__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
0, /*tp_doc*/
__pyx_tp_traverse_memoryview, /*tp_traverse*/
__pyx_tp_clear_memoryview, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_memoryview, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets_memoryview, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_memoryview, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
};
static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice;
static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_memoryviewslice_obj *p;
PyObject *o = __pyx_tp_new_memoryview(t, a, k);
if (unlikely(!o)) return 0;
p = ((struct __pyx_memoryviewslice_obj *)o);
p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice;
p->from_object = Py_None; Py_INCREF(Py_None);
p->from_slice.memview = NULL;
return o;
}
static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) {
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
__Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
__pyx_memoryviewslice___dealloc__(o);
__Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->from_object);
PyObject_GC_Track(o);
__pyx_tp_dealloc_memoryview(o);
}
static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e;
if (p->from_object) {
e = (*v)(p->from_object, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear__memoryviewslice(PyObject *o) {
PyObject* tmp;
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
__pyx_tp_clear_memoryview(o);
tmp = ((PyObject*)p->from_object);
p->from_object = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
__PYX_XDEC_MEMVIEW(&p->from_slice, 1);
return 0;
}
static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(o);
}
static PyMethodDef __pyx_methods__memoryviewslice[] = {
{"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_1__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_3__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = {
{(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, (char *)0, 0},
{0, 0, 0, 0, 0}
};
static PyTypeObject __pyx_type___pyx_memoryviewslice = {
PyVarObject_HEAD_INIT(0, 0)
"monotonic_align.core._memoryviewslice", /*tp_name*/
sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/
#if PY_VERSION_HEX < 0x030800b4
0, /*tp_print*/
#endif
#if PY_VERSION_HEX >= 0x030800b4
0, /*tp_vectorcall_offset*/
#endif
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
#if CYTHON_COMPILING_IN_PYPY
__pyx_memoryview___repr__, /*tp_repr*/
#else
0, /*tp_repr*/
#endif
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
#if CYTHON_COMPILING_IN_PYPY
__pyx_memoryview___str__, /*tp_str*/
#else
0, /*tp_str*/
#endif
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
"Internal class for passing memoryview slices to Python", /*tp_doc*/
__pyx_tp_traverse__memoryviewslice, /*tp_traverse*/
__pyx_tp_clear__memoryviewslice, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods__memoryviewslice, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets__memoryviewslice, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new__memoryviewslice, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
};
static PyMethodDef __pyx_methods[] = {
{"maximum_path_c", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_15monotonic_align_4core_1maximum_path_c, METH_VARARGS|METH_KEYWORDS, 0},
{0, 0, 0, 0}
};
#if PY_MAJOR_VERSION >= 3
#if CYTHON_PEP489_MULTI_PHASE_INIT
static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/
static int __pyx_pymod_exec_core(PyObject* module); /*proto*/
static PyModuleDef_Slot __pyx_moduledef_slots[] = {
{Py_mod_create, (void*)__pyx_pymod_create},
{Py_mod_exec, (void*)__pyx_pymod_exec_core},
{0, NULL}
};
#endif
static struct PyModuleDef __pyx_moduledef = {
PyModuleDef_HEAD_INIT,
"core",
0, /* m_doc */
#if CYTHON_PEP489_MULTI_PHASE_INIT
0, /* m_size */
#else
-1, /* m_size */
#endif
__pyx_methods /* m_methods */,
#if CYTHON_PEP489_MULTI_PHASE_INIT
__pyx_moduledef_slots, /* m_slots */
#else
NULL, /* m_reload */
#endif
NULL, /* m_traverse */
NULL, /* m_clear */
NULL /* m_free */
};
#endif
#ifndef CYTHON_SMALL_CODE
#if defined(__clang__)
#define CYTHON_SMALL_CODE
#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))
#define CYTHON_SMALL_CODE __attribute__((cold))
#else
#define CYTHON_SMALL_CODE
#endif
#endif
static __Pyx_StringTabEntry __pyx_string_tab[] = {
{&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1},
{&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0},
{&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0},
{&__pyx_kp_s_Cannot_assign_to_read_only_memor, __pyx_k_Cannot_assign_to_read_only_memor, sizeof(__pyx_k_Cannot_assign_to_read_only_memor), 0, 0, 1, 0},
{&__pyx_kp_s_Cannot_create_writable_memory_vi, __pyx_k_Cannot_create_writable_memory_vi, sizeof(__pyx_k_Cannot_create_writable_memory_vi), 0, 0, 1, 0},
{&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0},
{&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1},
{&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0},
{&__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_k_Incompatible_checksums_s_vs_0xb0, sizeof(__pyx_k_Incompatible_checksums_s_vs_0xb0), 0, 0, 1, 0},
{&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1},
{&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0},
{&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0},
{&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0},
{&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1},
{&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0},
{&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0},
{&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1},
{&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0},
{&__pyx_n_s_PickleError, __pyx_k_PickleError, sizeof(__pyx_k_PickleError), 0, 0, 1, 1},
{&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1},
{&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0},
{&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1},
{&__pyx_n_s_View_MemoryView, __pyx_k_View_MemoryView, sizeof(__pyx_k_View_MemoryView), 0, 0, 1, 1},
{&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1},
{&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1},
{&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1},
{&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1},
{&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1},
{&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1},
{&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0},
{&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0},
{&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1},
{&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1},
{&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1},
{&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1},
{&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1},
{&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1},
{&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1},
{&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1},
{&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1},
{&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1},
{&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0},
{&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1},
{&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1},
{&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1},
{&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0},
{&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1},
{&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1},
{&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1},
{&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1},
{&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1},
{&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1},
{&__pyx_n_s_new, __pyx_k_new, sizeof(__pyx_k_new), 0, 0, 1, 1},
{&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0},
{&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1},
{&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1},
{&__pyx_n_s_paths, __pyx_k_paths, sizeof(__pyx_k_paths), 0, 0, 1, 1},
{&__pyx_n_s_pickle, __pyx_k_pickle, sizeof(__pyx_k_pickle), 0, 0, 1, 1},
{&__pyx_n_s_pyx_PickleError, __pyx_k_pyx_PickleError, sizeof(__pyx_k_pyx_PickleError), 0, 0, 1, 1},
{&__pyx_n_s_pyx_checksum, __pyx_k_pyx_checksum, sizeof(__pyx_k_pyx_checksum), 0, 0, 1, 1},
{&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1},
{&__pyx_n_s_pyx_result, __pyx_k_pyx_result, sizeof(__pyx_k_pyx_result), 0, 0, 1, 1},
{&__pyx_n_s_pyx_state, __pyx_k_pyx_state, sizeof(__pyx_k_pyx_state), 0, 0, 1, 1},
{&__pyx_n_s_pyx_type, __pyx_k_pyx_type, sizeof(__pyx_k_pyx_type), 0, 0, 1, 1},
{&__pyx_n_s_pyx_unpickle_Enum, __pyx_k_pyx_unpickle_Enum, sizeof(__pyx_k_pyx_unpickle_Enum), 0, 0, 1, 1},
{&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1},
{&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1},
{&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1},
{&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1},
{&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1},
{&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1},
{&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1},
{&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1},
{&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1},
{&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1},
{&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1},
{&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1},
{&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0},
{&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0},
{&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0},
{&__pyx_kp_s_stringsource, __pyx_k_stringsource, sizeof(__pyx_k_stringsource), 0, 0, 1, 0},
{&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1},
{&__pyx_n_s_t_xs, __pyx_k_t_xs, sizeof(__pyx_k_t_xs), 0, 0, 1, 1},
{&__pyx_n_s_t_ys, __pyx_k_t_ys, sizeof(__pyx_k_t_ys), 0, 0, 1, 1},
{&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1},
{&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0},
{&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0},
{&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1},
{&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1},
{&__pyx_n_s_values, __pyx_k_values, sizeof(__pyx_k_values), 0, 0, 1, 1},
{0, 0, 0, 0, 0, 0, 0}
};
static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) {
__pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 15, __pyx_L1_error)
__pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 133, __pyx_L1_error)
__pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(1, 148, __pyx_L1_error)
__pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(1, 151, __pyx_L1_error)
__pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(1, 2, __pyx_L1_error)
__pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) __PYX_ERR(1, 404, __pyx_L1_error)
__pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) __PYX_ERR(1, 613, __pyx_L1_error)
__pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) __PYX_ERR(1, 832, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0);
/* "View.MemoryView":133
*
* if not self.ndim:
* raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<<
*
* if itemsize <= 0:
*/
__pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 133, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__2);
__Pyx_GIVEREF(__pyx_tuple__2);
/* "View.MemoryView":136
*
* if itemsize <= 0:
* raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<<
*
* if not isinstance(format, bytes):
*/
__pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(1, 136, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__3);
__Pyx_GIVEREF(__pyx_tuple__3);
/* "View.MemoryView":148
*
* if not self._shape:
* raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 148, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__4);
__Pyx_GIVEREF(__pyx_tuple__4);
/* "View.MemoryView":176
* self.data = <char *>malloc(self.len)
* if not self.data:
* raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<<
*
* if self.dtype_is_object:
*/
__pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 176, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__5);
__Pyx_GIVEREF(__pyx_tuple__5);
/* "View.MemoryView":192
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<<
* info.buf = self.data
* info.len = self.len
*/
__pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 192, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__6);
__Pyx_GIVEREF(__pyx_tuple__6);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__7);
__Pyx_GIVEREF(__pyx_tuple__7);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__8);
__Pyx_GIVEREF(__pyx_tuple__8);
/* "View.MemoryView":418
* def __setitem__(memoryview self, object index, object value):
* if self.view.readonly:
* raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<<
*
* have_slices, index = _unellipsify(index, self.view.ndim)
*/
__pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_Cannot_assign_to_read_only_memor); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 418, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__9);
__Pyx_GIVEREF(__pyx_tuple__9);
/* "View.MemoryView":495
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
* raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<<
* else:
* if len(self.view.format) == 1:
*/
__pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 495, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__10);
__Pyx_GIVEREF(__pyx_tuple__10);
/* "View.MemoryView":520
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_WRITABLE and self.view.readonly:
* raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<<
*
* if flags & PyBUF_ND:
*/
__pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_Cannot_create_writable_memory_vi); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(1, 520, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__11);
__Pyx_GIVEREF(__pyx_tuple__11);
/* "View.MemoryView":570
* if self.view.strides == NULL:
*
* raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<<
*
* return tuple([stride for stride in self.view.strides[:self.view.ndim]])
*/
__pyx_tuple__12 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(1, 570, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__12);
__Pyx_GIVEREF(__pyx_tuple__12);
/* "View.MemoryView":577
* def suboffsets(self):
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim # <<<<<<<<<<<<<<
*
* return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]])
*/
__pyx_tuple__13 = PyTuple_New(1); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(1, 577, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__13);
__Pyx_INCREF(__pyx_int_neg_1);
__Pyx_GIVEREF(__pyx_int_neg_1);
PyTuple_SET_ITEM(__pyx_tuple__13, 0, __pyx_int_neg_1);
__Pyx_GIVEREF(__pyx_tuple__13);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_tuple__14 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__14);
__Pyx_GIVEREF(__pyx_tuple__14);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_tuple__15 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__15)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__15);
__Pyx_GIVEREF(__pyx_tuple__15);
/* "View.MemoryView":682
* if item is Ellipsis:
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<<
* seen_ellipsis = True
* else:
*/
__pyx_slice__16 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__16)) __PYX_ERR(1, 682, __pyx_L1_error)
__Pyx_GOTREF(__pyx_slice__16);
__Pyx_GIVEREF(__pyx_slice__16);
/* "View.MemoryView":703
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
* raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__17 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(1, 703, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__17);
__Pyx_GIVEREF(__pyx_tuple__17);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_tuple__18 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__18)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__18);
__Pyx_GIVEREF(__pyx_tuple__18);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_tuple__19 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__19)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__19);
__Pyx_GIVEREF(__pyx_tuple__19);
/* "View.MemoryView":286
* return self.name
*
* cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<<
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>")
*/
__pyx_tuple__20 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__20)) __PYX_ERR(1, 286, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__20);
__Pyx_GIVEREF(__pyx_tuple__20);
/* "View.MemoryView":287
*
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<<
* cdef indirect = Enum("<strided and indirect>")
*
*/
__pyx_tuple__21 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__21)) __PYX_ERR(1, 287, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__21);
__Pyx_GIVEREF(__pyx_tuple__21);
/* "View.MemoryView":288
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__22 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__22)) __PYX_ERR(1, 288, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__22);
__Pyx_GIVEREF(__pyx_tuple__22);
/* "View.MemoryView":291
*
*
* cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<<
* cdef indirect_contiguous = Enum("<contiguous and indirect>")
*
*/
__pyx_tuple__23 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__23)) __PYX_ERR(1, 291, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__23);
__Pyx_GIVEREF(__pyx_tuple__23);
/* "View.MemoryView":292
*
* cdef contiguous = Enum("<contiguous and direct>")
* cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__24 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__24)) __PYX_ERR(1, 292, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__24);
__Pyx_GIVEREF(__pyx_tuple__24);
/* "(tree fragment)":1
* def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
__pyx_tuple__25 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__25)) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__25);
__Pyx_GIVEREF(__pyx_tuple__25);
__pyx_codeobj__26 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__25, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_Enum, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__26)) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) {
/* InitThreads.init */
#if defined(WITH_THREAD) && PY_VERSION_HEX < 0x030700F0
PyEval_InitThreads();
#endif
if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error)
if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
__pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_184977713 = PyInt_FromLong(184977713L); if (unlikely(!__pyx_int_184977713)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/
static int __Pyx_modinit_global_init_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0);
/*--- Global init code ---*/
generic = Py_None; Py_INCREF(Py_None);
strided = Py_None; Py_INCREF(Py_None);
indirect = Py_None; Py_INCREF(Py_None);
contiguous = Py_None; Py_INCREF(Py_None);
indirect_contiguous = Py_None; Py_INCREF(Py_None);
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_variable_export_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0);
/*--- Variable export code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_function_export_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0);
/*--- Function export code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_type_init_code(void) {
__Pyx_RefNannyDeclarations
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0);
/*--- Type init code ---*/
__pyx_vtabptr_array = &__pyx_vtable_array;
__pyx_vtable_array.get_memview = (PyObject *(*)(struct __pyx_array_obj *))__pyx_array_get_memview;
if (PyType_Ready(&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error)
#if PY_VERSION_HEX < 0x030800B1
__pyx_type___pyx_array.tp_print = 0;
#endif
if (__Pyx_SetVtable(__pyx_type___pyx_array.tp_dict, __pyx_vtabptr_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error)
if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error)
__pyx_array_type = &__pyx_type___pyx_array;
if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error)
#if PY_VERSION_HEX < 0x030800B1
__pyx_type___pyx_MemviewEnum.tp_print = 0;
#endif
if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_MemviewEnum.tp_dictoffset && __pyx_type___pyx_MemviewEnum.tp_getattro == PyObject_GenericGetAttr)) {
__pyx_type___pyx_MemviewEnum.tp_getattro = __Pyx_PyObject_GenericGetAttr;
}
if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error)
__pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum;
__pyx_vtabptr_memoryview = &__pyx_vtable_memoryview;
__pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer;
__pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice;
__pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment;
__pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar;
__pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed;
__pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object;
__pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object;
if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error)
#if PY_VERSION_HEX < 0x030800B1
__pyx_type___pyx_memoryview.tp_print = 0;
#endif
if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryview.tp_dictoffset && __pyx_type___pyx_memoryview.tp_getattro == PyObject_GenericGetAttr)) {
__pyx_type___pyx_memoryview.tp_getattro = __Pyx_PyObject_GenericGetAttr;
}
if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error)
if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error)
__pyx_memoryview_type = &__pyx_type___pyx_memoryview;
__pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice;
__pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview;
__pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object;
__pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object;
__pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type;
if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error)
#if PY_VERSION_HEX < 0x030800B1
__pyx_type___pyx_memoryviewslice.tp_print = 0;
#endif
if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryviewslice.tp_dictoffset && __pyx_type___pyx_memoryviewslice.tp_getattro == PyObject_GenericGetAttr)) {
__pyx_type___pyx_memoryviewslice.tp_getattro = __Pyx_PyObject_GenericGetAttr;
}
if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error)
if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error)
__pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice;
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static int __Pyx_modinit_type_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0);
/*--- Type import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_variable_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0);
/*--- Variable import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_function_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0);
/*--- Function import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
#ifndef CYTHON_NO_PYINIT_EXPORT
#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC
#elif PY_MAJOR_VERSION < 3
#ifdef __cplusplus
#define __Pyx_PyMODINIT_FUNC extern "C" void
#else
#define __Pyx_PyMODINIT_FUNC void
#endif
#else
#ifdef __cplusplus
#define __Pyx_PyMODINIT_FUNC extern "C" PyObject *
#else
#define __Pyx_PyMODINIT_FUNC PyObject *
#endif
#endif
#if PY_MAJOR_VERSION < 3
__Pyx_PyMODINIT_FUNC initcore(void) CYTHON_SMALL_CODE; /*proto*/
__Pyx_PyMODINIT_FUNC initcore(void)
#else
__Pyx_PyMODINIT_FUNC PyInit_core(void) CYTHON_SMALL_CODE; /*proto*/
__Pyx_PyMODINIT_FUNC PyInit_core(void)
#if CYTHON_PEP489_MULTI_PHASE_INIT
{
return PyModuleDef_Init(&__pyx_moduledef);
}
static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) {
#if PY_VERSION_HEX >= 0x030700A1
static PY_INT64_T main_interpreter_id = -1;
PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp);
if (main_interpreter_id == -1) {
main_interpreter_id = current_id;
return (unlikely(current_id == -1)) ? -1 : 0;
} else if (unlikely(main_interpreter_id != current_id))
#else
static PyInterpreterState *main_interpreter = NULL;
PyInterpreterState *current_interpreter = PyThreadState_Get()->interp;
if (!main_interpreter) {
main_interpreter = current_interpreter;
} else if (unlikely(main_interpreter != current_interpreter))
#endif
{
PyErr_SetString(
PyExc_ImportError,
"Interpreter change detected - this module can only be loaded into one interpreter per process.");
return -1;
}
return 0;
}
static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) {
PyObject *value = PyObject_GetAttrString(spec, from_name);
int result = 0;
if (likely(value)) {
if (allow_none || value != Py_None) {
result = PyDict_SetItemString(moddict, to_name, value);
}
Py_DECREF(value);
} else if (PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Clear();
} else {
result = -1;
}
return result;
}
static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) {
PyObject *module = NULL, *moddict, *modname;
if (__Pyx_check_single_interpreter())
return NULL;
if (__pyx_m)
return __Pyx_NewRef(__pyx_m);
modname = PyObject_GetAttrString(spec, "name");
if (unlikely(!modname)) goto bad;
module = PyModule_NewObject(modname);
Py_DECREF(modname);
if (unlikely(!module)) goto bad;
moddict = PyModule_GetDict(module);
if (unlikely(!moddict)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad;
return module;
bad:
Py_XDECREF(module);
return NULL;
}
static CYTHON_SMALL_CODE int __pyx_pymod_exec_core(PyObject *__pyx_pyinit_module)
#endif
#endif
{
PyObject *__pyx_t_1 = NULL;
static PyThread_type_lock __pyx_t_2[8];
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannyDeclarations
#if CYTHON_PEP489_MULTI_PHASE_INIT
if (__pyx_m) {
if (__pyx_m == __pyx_pyinit_module) return 0;
PyErr_SetString(PyExc_RuntimeError, "Module 'core' has already been imported. Re-initialisation is not supported.");
return -1;
}
#elif PY_MAJOR_VERSION >= 3
if (__pyx_m) return __Pyx_NewRef(__pyx_m);
#endif
#if CYTHON_REFNANNY
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
if (!__Pyx_RefNanny) {
PyErr_Clear();
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
if (!__Pyx_RefNanny)
Py_FatalError("failed to import 'refnanny' module");
}
#endif
__Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_core(void)", 0);
if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#ifdef __Pxy_PyFrame_Initialize_Offsets
__Pxy_PyFrame_Initialize_Offsets();
#endif
__pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error)
#ifdef __Pyx_CyFunction_USED
if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_FusedFunction_USED
if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Coroutine_USED
if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Generator_USED
if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_AsyncGen_USED
if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_StopAsyncIteration_USED
if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/*--- Library function declarations ---*/
/*--- Threads initialization code ---*/
#if defined(WITH_THREAD) && PY_VERSION_HEX < 0x030700F0 && defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
PyEval_InitThreads();
#endif
/*--- Module creation code ---*/
#if CYTHON_PEP489_MULTI_PHASE_INIT
__pyx_m = __pyx_pyinit_module;
Py_INCREF(__pyx_m);
#else
#if PY_MAJOR_VERSION < 3
__pyx_m = Py_InitModule4("core", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m);
#else
__pyx_m = PyModule_Create(&__pyx_moduledef);
#endif
if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
__pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_d);
__pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_b);
__pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_cython_runtime);
if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
/*--- Initialize various global constants etc. ---*/
if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
if (__pyx_module_is_main_monotonic_align__core) {
if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name_2, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
}
#if PY_MAJOR_VERSION >= 3
{
PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error)
if (!PyDict_GetItemString(modules, "monotonic_align.core")) {
if (unlikely(PyDict_SetItemString(modules, "monotonic_align.core", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
}
}
#endif
/*--- Builtin init code ---*/
if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
/*--- Constants init code ---*/
if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
/*--- Global type/function init code ---*/
(void)__Pyx_modinit_global_init_code();
(void)__Pyx_modinit_variable_export_code();
(void)__Pyx_modinit_function_export_code();
if (unlikely(__Pyx_modinit_type_init_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
(void)__Pyx_modinit_type_import_code();
(void)__Pyx_modinit_variable_import_code();
(void)__Pyx_modinit_function_import_code();
/*--- Execution code ---*/
#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)
if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/* "monotonic_align/core.pyx":7
* @cython.boundscheck(False)
* @cython.wraparound(False)
* cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<<
* cdef int x
* cdef int y
*/
__pyx_k_ = (-1e9);
/* "monotonic_align/core.pyx":1
* cimport cython # <<<<<<<<<<<<<<
* from cython.parallel import prange
*
*/
__pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "View.MemoryView":209
* info.obj = self
*
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
* def __dealloc__(array self):
*/
__pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 209, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem((PyObject *)__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 209, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
PyType_Modified(__pyx_array_type);
/* "View.MemoryView":286
* return self.name
*
* cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<<
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>")
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__20, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 286, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(generic);
__Pyx_DECREF_SET(generic, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":287
*
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<<
* cdef indirect = Enum("<strided and indirect>")
*
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__21, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 287, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(strided);
__Pyx_DECREF_SET(strided, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":288
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__22, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 288, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(indirect);
__Pyx_DECREF_SET(indirect, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":291
*
*
* cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<<
* cdef indirect_contiguous = Enum("<contiguous and indirect>")
*
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__23, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 291, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(contiguous);
__Pyx_DECREF_SET(contiguous, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":292
*
* cdef contiguous = Enum("<contiguous and direct>")
* cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__24, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 292, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(indirect_contiguous);
__Pyx_DECREF_SET(indirect_contiguous, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":316
*
* DEF THREAD_LOCKS_PREALLOCATED = 8
* cdef int __pyx_memoryview_thread_locks_used = 0 # <<<<<<<<<<<<<<
* cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [
* PyThread_allocate_lock(),
*/
__pyx_memoryview_thread_locks_used = 0;
/* "View.MemoryView":317
* DEF THREAD_LOCKS_PREALLOCATED = 8
* cdef int __pyx_memoryview_thread_locks_used = 0
* cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ # <<<<<<<<<<<<<<
* PyThread_allocate_lock(),
* PyThread_allocate_lock(),
*/
__pyx_t_2[0] = PyThread_allocate_lock();
__pyx_t_2[1] = PyThread_allocate_lock();
__pyx_t_2[2] = PyThread_allocate_lock();
__pyx_t_2[3] = PyThread_allocate_lock();
__pyx_t_2[4] = PyThread_allocate_lock();
__pyx_t_2[5] = PyThread_allocate_lock();
__pyx_t_2[6] = PyThread_allocate_lock();
__pyx_t_2[7] = PyThread_allocate_lock();
memcpy(&(__pyx_memoryview_thread_locks[0]), __pyx_t_2, sizeof(__pyx_memoryview_thread_locks[0]) * (8));
/* "View.MemoryView":549
* info.obj = self
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 549, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem((PyObject *)__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 549, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
PyType_Modified(__pyx_memoryview_type);
/* "View.MemoryView":995
* return self.from_object
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 995, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem((PyObject *)__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 995, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
PyType_Modified(__pyx_memoryviewslice_type);
/* "(tree fragment)":1
* def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
__pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum, NULL, __pyx_n_s_View_MemoryView); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_Enum, __pyx_t_1) < 0) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":11
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
*/
/*--- Wrapped vars code ---*/
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
if (__pyx_m) {
if (__pyx_d) {
__Pyx_AddTraceback("init monotonic_align.core", __pyx_clineno, __pyx_lineno, __pyx_filename);
}
Py_CLEAR(__pyx_m);
} else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_ImportError, "init monotonic_align.core");
}
__pyx_L0:;
__Pyx_RefNannyFinishContext();
#if CYTHON_PEP489_MULTI_PHASE_INIT
return (__pyx_m != NULL) ? 0 : -1;
#elif PY_MAJOR_VERSION >= 3
return __pyx_m;
#else
return;
#endif
}
/* --- Runtime support code --- */
/* Refnanny */
#if CYTHON_REFNANNY
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
PyObject *m = NULL, *p = NULL;
void *r = NULL;
m = PyImport_ImportModule(modname);
if (!m) goto end;
p = PyObject_GetAttrString(m, "RefNannyAPI");
if (!p) goto end;
r = PyLong_AsVoidPtr(p);
end:
Py_XDECREF(p);
Py_XDECREF(m);
return (__Pyx_RefNannyAPIStruct *)r;
}
#endif
/* PyObjectGetAttrStr */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) {
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_getattro))
return tp->tp_getattro(obj, attr_name);
#if PY_MAJOR_VERSION < 3
if (likely(tp->tp_getattr))
return tp->tp_getattr(obj, PyString_AS_STRING(attr_name));
#endif
return PyObject_GetAttr(obj, attr_name);
}
#endif
/* GetBuiltinName */
static PyObject *__Pyx_GetBuiltinName(PyObject *name) {
PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name);
if (unlikely(!result)) {
PyErr_Format(PyExc_NameError,
#if PY_MAJOR_VERSION >= 3
"name '%U' is not defined", name);
#else
"name '%.200s' is not defined", PyString_AS_STRING(name));
#endif
}
return result;
}
/* MemviewSliceInit */
static int
__Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview,
int ndim,
__Pyx_memviewslice *memviewslice,
int memview_is_new_reference)
{
__Pyx_RefNannyDeclarations
int i, retval=-1;
Py_buffer *buf = &memview->view;
__Pyx_RefNannySetupContext("init_memviewslice", 0);
if (unlikely(memviewslice->memview || memviewslice->data)) {
PyErr_SetString(PyExc_ValueError,
"memviewslice is already initialized!");
goto fail;
}
if (buf->strides) {
for (i = 0; i < ndim; i++) {
memviewslice->strides[i] = buf->strides[i];
}
} else {
Py_ssize_t stride = buf->itemsize;
for (i = ndim - 1; i >= 0; i--) {
memviewslice->strides[i] = stride;
stride *= buf->shape[i];
}
}
for (i = 0; i < ndim; i++) {
memviewslice->shape[i] = buf->shape[i];
if (buf->suboffsets) {
memviewslice->suboffsets[i] = buf->suboffsets[i];
} else {
memviewslice->suboffsets[i] = -1;
}
}
memviewslice->memview = memview;
memviewslice->data = (char *)buf->buf;
if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) {
Py_INCREF(memview);
}
retval = 0;
goto no_fail;
fail:
memviewslice->memview = 0;
memviewslice->data = 0;
retval = -1;
no_fail:
__Pyx_RefNannyFinishContext();
return retval;
}
#ifndef Py_NO_RETURN
#define Py_NO_RETURN
#endif
static void __pyx_fatalerror(const char *fmt, ...) Py_NO_RETURN {
va_list vargs;
char msg[200];
#ifdef HAVE_STDARG_PROTOTYPES
va_start(vargs, fmt);
#else
va_start(vargs);
#endif
vsnprintf(msg, 200, fmt, vargs);
va_end(vargs);
Py_FatalError(msg);
}
static CYTHON_INLINE int
__pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count,
PyThread_type_lock lock)
{
int result;
PyThread_acquire_lock(lock, 1);
result = (*acquisition_count)++;
PyThread_release_lock(lock);
return result;
}
static CYTHON_INLINE int
__pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count,
PyThread_type_lock lock)
{
int result;
PyThread_acquire_lock(lock, 1);
result = (*acquisition_count)--;
PyThread_release_lock(lock);
return result;
}
static CYTHON_INLINE void
__Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno)
{
int first_time;
struct __pyx_memoryview_obj *memview = memslice->memview;
if (unlikely(!memview || (PyObject *) memview == Py_None))
return;
if (unlikely(__pyx_get_slice_count(memview) < 0))
__pyx_fatalerror("Acquisition count is %d (line %d)",
__pyx_get_slice_count(memview), lineno);
first_time = __pyx_add_acquisition_count(memview) == 0;
if (unlikely(first_time)) {
if (have_gil) {
Py_INCREF((PyObject *) memview);
} else {
PyGILState_STATE _gilstate = PyGILState_Ensure();
Py_INCREF((PyObject *) memview);
PyGILState_Release(_gilstate);
}
}
}
static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice,
int have_gil, int lineno) {
int last_time;
struct __pyx_memoryview_obj *memview = memslice->memview;
if (unlikely(!memview || (PyObject *) memview == Py_None)) {
memslice->memview = NULL;
return;
}
if (unlikely(__pyx_get_slice_count(memview) <= 0))
__pyx_fatalerror("Acquisition count is %d (line %d)",
__pyx_get_slice_count(memview), lineno);
last_time = __pyx_sub_acquisition_count(memview) == 1;
memslice->data = NULL;
if (unlikely(last_time)) {
if (have_gil) {
Py_CLEAR(memslice->memview);
} else {
PyGILState_STATE _gilstate = PyGILState_Ensure();
Py_CLEAR(memslice->memview);
PyGILState_Release(_gilstate);
}
} else {
memslice->memview = NULL;
}
}
/* RaiseArgTupleInvalid */
static void __Pyx_RaiseArgtupleInvalid(
const char* func_name,
int exact,
Py_ssize_t num_min,
Py_ssize_t num_max,
Py_ssize_t num_found)
{
Py_ssize_t num_expected;
const char *more_or_less;
if (num_found < num_min) {
num_expected = num_min;
more_or_less = "at least";
} else {
num_expected = num_max;
more_or_less = "at most";
}
if (exact) {
more_or_less = "exactly";
}
PyErr_Format(PyExc_TypeError,
"%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)",
func_name, more_or_less, num_expected,
(num_expected == 1) ? "" : "s", num_found);
}
/* RaiseDoubleKeywords */
static void __Pyx_RaiseDoubleKeywordsError(
const char* func_name,
PyObject* kw_name)
{
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION >= 3
"%s() got multiple values for keyword argument '%U'", func_name, kw_name);
#else
"%s() got multiple values for keyword argument '%s'", func_name,
PyString_AsString(kw_name));
#endif
}
/* ParseKeywords */
static int __Pyx_ParseOptionalKeywords(
PyObject *kwds,
PyObject **argnames[],
PyObject *kwds2,
PyObject *values[],
Py_ssize_t num_pos_args,
const char* function_name)
{
PyObject *key = 0, *value = 0;
Py_ssize_t pos = 0;
PyObject*** name;
PyObject*** first_kw_arg = argnames + num_pos_args;
while (PyDict_Next(kwds, &pos, &key, &value)) {
name = first_kw_arg;
while (*name && (**name != key)) name++;
if (*name) {
values[name-argnames] = value;
continue;
}
name = first_kw_arg;
#if PY_MAJOR_VERSION < 3
if (likely(PyString_Check(key))) {
while (*name) {
if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key))
&& _PyString_Eq(**name, key)) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
if ((**argname == key) || (
(CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key))
&& _PyString_Eq(**argname, key))) {
goto arg_passed_twice;
}
argname++;
}
}
} else
#endif
if (likely(PyUnicode_Check(key))) {
while (*name) {
int cmp = (**name == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 :
#endif
PyUnicode_Compare(**name, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
int cmp = (**argname == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 :
#endif
PyUnicode_Compare(**argname, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) goto arg_passed_twice;
argname++;
}
}
} else
goto invalid_keyword_type;
if (kwds2) {
if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
} else {
goto invalid_keyword;
}
}
return 0;
arg_passed_twice:
__Pyx_RaiseDoubleKeywordsError(function_name, key);
goto bad;
invalid_keyword_type:
PyErr_Format(PyExc_TypeError,
"%.200s() keywords must be strings", function_name);
goto bad;
invalid_keyword:
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION < 3
"%.200s() got an unexpected keyword argument '%.200s'",
function_name, PyString_AsString(key));
#else
"%s() got an unexpected keyword argument '%U'",
function_name, key);
#endif
bad:
return -1;
}
/* None */
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) {
PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname);
}
/* ArgTypeTest */
static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact)
{
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
else if (exact) {
#if PY_MAJOR_VERSION == 2
if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1;
#endif
}
else {
if (likely(__Pyx_TypeCheck(obj, type))) return 1;
}
PyErr_Format(PyExc_TypeError,
"Argument '%.200s' has incorrect type (expected %.200s, got %.200s)",
name, type->tp_name, Py_TYPE(obj)->tp_name);
return 0;
}
/* PyObjectCall */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) {
PyObject *result;
ternaryfunc call = Py_TYPE(func)->tp_call;
if (unlikely(!call))
return PyObject_Call(func, arg, kw);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = (*call)(func, arg, kw);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* PyErrFetchRestore */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
tmp_type = tstate->curexc_type;
tmp_value = tstate->curexc_value;
tmp_tb = tstate->curexc_traceback;
tstate->curexc_type = type;
tstate->curexc_value = value;
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
*type = tstate->curexc_type;
*value = tstate->curexc_value;
*tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
}
#endif
/* RaiseException */
#if PY_MAJOR_VERSION < 3
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,
CYTHON_UNUSED PyObject *cause) {
__Pyx_PyThreadState_declare
Py_XINCREF(type);
if (!value || value == Py_None)
value = NULL;
else
Py_INCREF(value);
if (!tb || tb == Py_None)
tb = NULL;
else {
Py_INCREF(tb);
if (!PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto raise_error;
}
}
if (PyType_Check(type)) {
#if CYTHON_COMPILING_IN_PYPY
if (!value) {
Py_INCREF(Py_None);
value = Py_None;
}
#endif
PyErr_NormalizeException(&type, &value, &tb);
} else {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto raise_error;
}
value = type;
type = (PyObject*) Py_TYPE(type);
Py_INCREF(type);
if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto raise_error;
}
}
__Pyx_PyThreadState_assign
__Pyx_ErrRestore(type, value, tb);
return;
raise_error:
Py_XDECREF(value);
Py_XDECREF(type);
Py_XDECREF(tb);
return;
}
#else
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
PyObject* owned_instance = NULL;
if (tb == Py_None) {
tb = 0;
} else if (tb && !PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto bad;
}
if (value == Py_None)
value = 0;
if (PyExceptionInstance_Check(type)) {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto bad;
}
value = type;
type = (PyObject*) Py_TYPE(value);
} else if (PyExceptionClass_Check(type)) {
PyObject *instance_class = NULL;
if (value && PyExceptionInstance_Check(value)) {
instance_class = (PyObject*) Py_TYPE(value);
if (instance_class != type) {
int is_subclass = PyObject_IsSubclass(instance_class, type);
if (!is_subclass) {
instance_class = NULL;
} else if (unlikely(is_subclass == -1)) {
goto bad;
} else {
type = instance_class;
}
}
}
if (!instance_class) {
PyObject *args;
if (!value)
args = PyTuple_New(0);
else if (PyTuple_Check(value)) {
Py_INCREF(value);
args = value;
} else
args = PyTuple_Pack(1, value);
if (!args)
goto bad;
owned_instance = PyObject_Call(type, args, NULL);
Py_DECREF(args);
if (!owned_instance)
goto bad;
value = owned_instance;
if (!PyExceptionInstance_Check(value)) {
PyErr_Format(PyExc_TypeError,
"calling %R should have returned an instance of "
"BaseException, not %R",
type, Py_TYPE(value));
goto bad;
}
}
} else {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto bad;
}
if (cause) {
PyObject *fixed_cause;
if (cause == Py_None) {
fixed_cause = NULL;
} else if (PyExceptionClass_Check(cause)) {
fixed_cause = PyObject_CallObject(cause, NULL);
if (fixed_cause == NULL)
goto bad;
} else if (PyExceptionInstance_Check(cause)) {
fixed_cause = cause;
Py_INCREF(fixed_cause);
} else {
PyErr_SetString(PyExc_TypeError,
"exception causes must derive from "
"BaseException");
goto bad;
}
PyException_SetCause(value, fixed_cause);
}
PyErr_SetObject(type, value);
if (tb) {
#if CYTHON_COMPILING_IN_PYPY
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb);
Py_INCREF(tb);
PyErr_Restore(tmp_type, tmp_value, tb);
Py_XDECREF(tmp_tb);
#else
PyThreadState *tstate = __Pyx_PyThreadState_Current;
PyObject* tmp_tb = tstate->curexc_traceback;
if (tb != tmp_tb) {
Py_INCREF(tb);
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_tb);
}
#endif
}
bad:
Py_XDECREF(owned_instance);
return;
}
#endif
/* PyCFunctionFastCall */
#if CYTHON_FAST_PYCCALL
static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) {
PyCFunctionObject *func = (PyCFunctionObject*)func_obj;
PyCFunction meth = PyCFunction_GET_FUNCTION(func);
PyObject *self = PyCFunction_GET_SELF(func);
int flags = PyCFunction_GET_FLAGS(func);
assert(PyCFunction_Check(func));
assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)));
assert(nargs >= 0);
assert(nargs == 0 || args != NULL);
/* _PyCFunction_FastCallDict() must not be called with an exception set,
because it may clear it (directly or indirectly) and so the
caller loses its exception */
assert(!PyErr_Occurred());
if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) {
return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL);
} else {
return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs);
}
}
#endif
/* PyFunctionFastCall */
#if CYTHON_FAST_PYCALL
static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na,
PyObject *globals) {
PyFrameObject *f;
PyThreadState *tstate = __Pyx_PyThreadState_Current;
PyObject **fastlocals;
Py_ssize_t i;
PyObject *result;
assert(globals != NULL);
/* XXX Perhaps we should create a specialized
PyFrame_New() that doesn't take locals, but does
take builtins without sanity checking them.
*/
assert(tstate != NULL);
f = PyFrame_New(tstate, co, globals, NULL);
if (f == NULL) {
return NULL;
}
fastlocals = __Pyx_PyFrame_GetLocalsplus(f);
for (i = 0; i < na; i++) {
Py_INCREF(*args);
fastlocals[i] = *args++;
}
result = PyEval_EvalFrameEx(f,0);
++tstate->recursion_depth;
Py_DECREF(f);
--tstate->recursion_depth;
return result;
}
#if 1 || PY_VERSION_HEX < 0x030600B1
static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) {
PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func);
PyObject *globals = PyFunction_GET_GLOBALS(func);
PyObject *argdefs = PyFunction_GET_DEFAULTS(func);
PyObject *closure;
#if PY_MAJOR_VERSION >= 3
PyObject *kwdefs;
#endif
PyObject *kwtuple, **k;
PyObject **d;
Py_ssize_t nd;
Py_ssize_t nk;
PyObject *result;
assert(kwargs == NULL || PyDict_Check(kwargs));
nk = kwargs ? PyDict_Size(kwargs) : 0;
if (Py_EnterRecursiveCall((char*)" while calling a Python object")) {
return NULL;
}
if (
#if PY_MAJOR_VERSION >= 3
co->co_kwonlyargcount == 0 &&
#endif
likely(kwargs == NULL || nk == 0) &&
co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) {
if (argdefs == NULL && co->co_argcount == nargs) {
result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals);
goto done;
}
else if (nargs == 0 && argdefs != NULL
&& co->co_argcount == Py_SIZE(argdefs)) {
/* function called with no arguments, but all parameters have
a default value: use default values as arguments .*/
args = &PyTuple_GET_ITEM(argdefs, 0);
result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals);
goto done;
}
}
if (kwargs != NULL) {
Py_ssize_t pos, i;
kwtuple = PyTuple_New(2 * nk);
if (kwtuple == NULL) {
result = NULL;
goto done;
}
k = &PyTuple_GET_ITEM(kwtuple, 0);
pos = i = 0;
while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) {
Py_INCREF(k[i]);
Py_INCREF(k[i+1]);
i += 2;
}
nk = i / 2;
}
else {
kwtuple = NULL;
k = NULL;
}
closure = PyFunction_GET_CLOSURE(func);
#if PY_MAJOR_VERSION >= 3
kwdefs = PyFunction_GET_KW_DEFAULTS(func);
#endif
if (argdefs != NULL) {
d = &PyTuple_GET_ITEM(argdefs, 0);
nd = Py_SIZE(argdefs);
}
else {
d = NULL;
nd = 0;
}
#if PY_MAJOR_VERSION >= 3
result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL,
args, (int)nargs,
k, (int)nk,
d, (int)nd, kwdefs, closure);
#else
result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL,
args, (int)nargs,
k, (int)nk,
d, (int)nd, closure);
#endif
Py_XDECREF(kwtuple);
done:
Py_LeaveRecursiveCall();
return result;
}
#endif
#endif
/* PyObjectCall2Args */
static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) {
PyObject *args, *result = NULL;
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(function)) {
PyObject *args[2] = {arg1, arg2};
return __Pyx_PyFunction_FastCall(function, args, 2);
}
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(function)) {
PyObject *args[2] = {arg1, arg2};
return __Pyx_PyCFunction_FastCall(function, args, 2);
}
#endif
args = PyTuple_New(2);
if (unlikely(!args)) goto done;
Py_INCREF(arg1);
PyTuple_SET_ITEM(args, 0, arg1);
Py_INCREF(arg2);
PyTuple_SET_ITEM(args, 1, arg2);
Py_INCREF(function);
result = __Pyx_PyObject_Call(function, args, NULL);
Py_DECREF(args);
Py_DECREF(function);
done:
return result;
}
/* PyObjectCallMethO */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) {
PyObject *self, *result;
PyCFunction cfunc;
cfunc = PyCFunction_GET_FUNCTION(func);
self = PyCFunction_GET_SELF(func);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = cfunc(self, arg);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* PyObjectCallOneArg */
#if CYTHON_COMPILING_IN_CPYTHON
static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_New(1);
if (unlikely(!args)) return NULL;
Py_INCREF(arg);
PyTuple_SET_ITEM(args, 0, arg);
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(func)) {
return __Pyx_PyFunction_FastCall(func, &arg, 1);
}
#endif
if (likely(PyCFunction_Check(func))) {
if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) {
return __Pyx_PyObject_CallMethO(func, arg);
#if CYTHON_FAST_PYCCALL
} else if (__Pyx_PyFastCFunction_Check(func)) {
return __Pyx_PyCFunction_FastCall(func, &arg, 1);
#endif
}
}
return __Pyx__PyObject_CallOneArg(func, arg);
}
#else
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_Pack(1, arg);
if (unlikely(!args)) return NULL;
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
#endif
/* BytesEquals */
static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) {
#if CYTHON_COMPILING_IN_PYPY
return PyObject_RichCompareBool(s1, s2, equals);
#else
if (s1 == s2) {
return (equals == Py_EQ);
} else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) {
const char *ps1, *ps2;
Py_ssize_t length = PyBytes_GET_SIZE(s1);
if (length != PyBytes_GET_SIZE(s2))
return (equals == Py_NE);
ps1 = PyBytes_AS_STRING(s1);
ps2 = PyBytes_AS_STRING(s2);
if (ps1[0] != ps2[0]) {
return (equals == Py_NE);
} else if (length == 1) {
return (equals == Py_EQ);
} else {
int result;
#if CYTHON_USE_UNICODE_INTERNALS
Py_hash_t hash1, hash2;
hash1 = ((PyBytesObject*)s1)->ob_shash;
hash2 = ((PyBytesObject*)s2)->ob_shash;
if (hash1 != hash2 && hash1 != -1 && hash2 != -1) {
return (equals == Py_NE);
}
#endif
result = memcmp(ps1, ps2, (size_t)length);
return (equals == Py_EQ) ? (result == 0) : (result != 0);
}
} else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) {
return (equals == Py_NE);
} else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) {
return (equals == Py_NE);
} else {
int result;
PyObject* py_result = PyObject_RichCompare(s1, s2, equals);
if (!py_result)
return -1;
result = __Pyx_PyObject_IsTrue(py_result);
Py_DECREF(py_result);
return result;
}
#endif
}
/* UnicodeEquals */
static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) {
#if CYTHON_COMPILING_IN_PYPY
return PyObject_RichCompareBool(s1, s2, equals);
#else
#if PY_MAJOR_VERSION < 3
PyObject* owned_ref = NULL;
#endif
int s1_is_unicode, s2_is_unicode;
if (s1 == s2) {
goto return_eq;
}
s1_is_unicode = PyUnicode_CheckExact(s1);
s2_is_unicode = PyUnicode_CheckExact(s2);
#if PY_MAJOR_VERSION < 3
if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) {
owned_ref = PyUnicode_FromObject(s2);
if (unlikely(!owned_ref))
return -1;
s2 = owned_ref;
s2_is_unicode = 1;
} else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) {
owned_ref = PyUnicode_FromObject(s1);
if (unlikely(!owned_ref))
return -1;
s1 = owned_ref;
s1_is_unicode = 1;
} else if (((!s2_is_unicode) & (!s1_is_unicode))) {
return __Pyx_PyBytes_Equals(s1, s2, equals);
}
#endif
if (s1_is_unicode & s2_is_unicode) {
Py_ssize_t length;
int kind;
void *data1, *data2;
if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0))
return -1;
length = __Pyx_PyUnicode_GET_LENGTH(s1);
if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) {
goto return_ne;
}
#if CYTHON_USE_UNICODE_INTERNALS
{
Py_hash_t hash1, hash2;
#if CYTHON_PEP393_ENABLED
hash1 = ((PyASCIIObject*)s1)->hash;
hash2 = ((PyASCIIObject*)s2)->hash;
#else
hash1 = ((PyUnicodeObject*)s1)->hash;
hash2 = ((PyUnicodeObject*)s2)->hash;
#endif
if (hash1 != hash2 && hash1 != -1 && hash2 != -1) {
goto return_ne;
}
}
#endif
kind = __Pyx_PyUnicode_KIND(s1);
if (kind != __Pyx_PyUnicode_KIND(s2)) {
goto return_ne;
}
data1 = __Pyx_PyUnicode_DATA(s1);
data2 = __Pyx_PyUnicode_DATA(s2);
if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) {
goto return_ne;
} else if (length == 1) {
goto return_eq;
} else {
int result = memcmp(data1, data2, (size_t)(length * kind));
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_EQ) ? (result == 0) : (result != 0);
}
} else if ((s1 == Py_None) & s2_is_unicode) {
goto return_ne;
} else if ((s2 == Py_None) & s1_is_unicode) {
goto return_ne;
} else {
int result;
PyObject* py_result = PyObject_RichCompare(s1, s2, equals);
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
if (!py_result)
return -1;
result = __Pyx_PyObject_IsTrue(py_result);
Py_DECREF(py_result);
return result;
}
return_eq:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_EQ);
return_ne:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_NE);
#endif
}
/* None */
static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t a, Py_ssize_t b) {
Py_ssize_t q = a / b;
Py_ssize_t r = a - q*b;
q -= ((r != 0) & ((r ^ b) < 0));
return q;
}
/* GetAttr */
static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) {
#if CYTHON_USE_TYPE_SLOTS
#if PY_MAJOR_VERSION >= 3
if (likely(PyUnicode_Check(n)))
#else
if (likely(PyString_Check(n)))
#endif
return __Pyx_PyObject_GetAttrStr(o, n);
#endif
return PyObject_GetAttr(o, n);
}
/* GetItemInt */
static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) {
PyObject *r;
if (!j) return NULL;
r = PyObject_GetItem(o, j);
Py_DECREF(j);
return r;
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
Py_ssize_t wrapped_i = i;
if (wraparound & unlikely(i < 0)) {
wrapped_i += PyList_GET_SIZE(o);
}
if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) {
PyObject *r = PyList_GET_ITEM(o, wrapped_i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
Py_ssize_t wrapped_i = i;
if (wraparound & unlikely(i < 0)) {
wrapped_i += PyTuple_GET_SIZE(o);
}
if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, wrapped_i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS
if (is_list || PyList_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o);
if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) {
PyObject *r = PyList_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
}
else if (PyTuple_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o);
if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
} else {
PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence;
if (likely(m && m->sq_item)) {
if (wraparound && unlikely(i < 0) && likely(m->sq_length)) {
Py_ssize_t l = m->sq_length(o);
if (likely(l >= 0)) {
i += l;
} else {
if (!PyErr_ExceptionMatches(PyExc_OverflowError))
return NULL;
PyErr_Clear();
}
}
return m->sq_item(o, i);
}
}
#else
if (is_list || PySequence_Check(o)) {
return PySequence_GetItem(o, i);
}
#endif
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
}
/* ObjectGetItem */
#if CYTHON_USE_TYPE_SLOTS
static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) {
PyObject *runerr;
Py_ssize_t key_value;
PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence;
if (unlikely(!(m && m->sq_item))) {
PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name);
return NULL;
}
key_value = __Pyx_PyIndex_AsSsize_t(index);
if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) {
return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1);
}
if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) {
PyErr_Clear();
PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name);
}
return NULL;
}
static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) {
PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping;
if (likely(m && m->mp_subscript)) {
return m->mp_subscript(obj, key);
}
return __Pyx_PyObject_GetIndex(obj, key);
}
#endif
/* decode_c_string */
static CYTHON_INLINE PyObject* __Pyx_decode_c_string(
const char* cstring, Py_ssize_t start, Py_ssize_t stop,
const char* encoding, const char* errors,
PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) {
Py_ssize_t length;
if (unlikely((start < 0) | (stop < 0))) {
size_t slen = strlen(cstring);
if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) {
PyErr_SetString(PyExc_OverflowError,
"c-string too long to convert to Python");
return NULL;
}
length = (Py_ssize_t) slen;
if (start < 0) {
start += length;
if (start < 0)
start = 0;
}
if (stop < 0)
stop += length;
}
if (unlikely(stop <= start))
return __Pyx_NewRef(__pyx_empty_unicode);
length = stop - start;
cstring += start;
if (decode_func) {
return decode_func(cstring, length, errors);
} else {
return PyUnicode_Decode(cstring, length, encoding, errors);
}
}
/* PyErrExceptionMatches */
#if CYTHON_FAST_THREAD_STATE
static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
Py_ssize_t i, n;
n = PyTuple_GET_SIZE(tuple);
#if PY_MAJOR_VERSION >= 3
for (i=0; i<n; i++) {
if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
}
#endif
for (i=0; i<n; i++) {
if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1;
}
return 0;
}
static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) {
PyObject *exc_type = tstate->curexc_type;
if (exc_type == err) return 1;
if (unlikely(!exc_type)) return 0;
if (unlikely(PyTuple_Check(err)))
return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err);
return __Pyx_PyErr_GivenExceptionMatches(exc_type, err);
}
#endif
/* GetAttr3 */
static PyObject *__Pyx_GetAttr3Default(PyObject *d) {
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError)))
return NULL;
__Pyx_PyErr_Clear();
Py_INCREF(d);
return d;
}
static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) {
PyObject *r = __Pyx_GetAttr(o, n);
return (likely(r)) ? r : __Pyx_GetAttr3Default(d);
}
/* PyDictVersioning */
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) {
PyObject *dict = Py_TYPE(obj)->tp_dict;
return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0;
}
static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) {
PyObject **dictptr = NULL;
Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset;
if (offset) {
#if CYTHON_COMPILING_IN_CPYTHON
dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj);
#else
dictptr = _PyObject_GetDictPtr(obj);
#endif
}
return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0;
}
static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) {
PyObject *dict = Py_TYPE(obj)->tp_dict;
if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict)))
return 0;
return obj_dict_version == __Pyx_get_object_dict_version(obj);
}
#endif
/* GetModuleGlobalName */
#if CYTHON_USE_DICT_VERSIONS
static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value)
#else
static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name)
#endif
{
PyObject *result;
#if !CYTHON_AVOID_BORROWED_REFS
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1
result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash);
__PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
} else if (unlikely(PyErr_Occurred())) {
return NULL;
}
#else
result = PyDict_GetItem(__pyx_d, name);
__PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
}
#endif
#else
result = PyObject_GetItem(__pyx_d, name);
__PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
}
PyErr_Clear();
#endif
return __Pyx_GetBuiltinName(name);
}
/* RaiseTooManyValuesToUnpack */
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) {
PyErr_Format(PyExc_ValueError,
"too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected);
}
/* RaiseNeedMoreValuesToUnpack */
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) {
PyErr_Format(PyExc_ValueError,
"need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack",
index, (index == 1) ? "" : "s");
}
/* RaiseNoneIterError */
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
}
/* ExtTypeTest */
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) {
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
if (likely(__Pyx_TypeCheck(obj, type)))
return 1;
PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s",
Py_TYPE(obj)->tp_name, type->tp_name);
return 0;
}
/* GetTopmostException */
#if CYTHON_USE_EXC_INFO_STACK
static _PyErr_StackItem *
__Pyx_PyErr_GetTopmostException(PyThreadState *tstate)
{
_PyErr_StackItem *exc_info = tstate->exc_info;
while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) &&
exc_info->previous_item != NULL)
{
exc_info = exc_info->previous_item;
}
return exc_info;
}
#endif
/* SaveResetException */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
#if CYTHON_USE_EXC_INFO_STACK
_PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate);
*type = exc_info->exc_type;
*value = exc_info->exc_value;
*tb = exc_info->exc_traceback;
#else
*type = tstate->exc_type;
*value = tstate->exc_value;
*tb = tstate->exc_traceback;
#endif
Py_XINCREF(*type);
Py_XINCREF(*value);
Py_XINCREF(*tb);
}
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
#if CYTHON_USE_EXC_INFO_STACK
_PyErr_StackItem *exc_info = tstate->exc_info;
tmp_type = exc_info->exc_type;
tmp_value = exc_info->exc_value;
tmp_tb = exc_info->exc_traceback;
exc_info->exc_type = type;
exc_info->exc_value = value;
exc_info->exc_traceback = tb;
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = type;
tstate->exc_value = value;
tstate->exc_traceback = tb;
#endif
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
#endif
/* GetException */
#if CYTHON_FAST_THREAD_STATE
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb)
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb)
#endif
{
PyObject *local_type, *local_value, *local_tb;
#if CYTHON_FAST_THREAD_STATE
PyObject *tmp_type, *tmp_value, *tmp_tb;
local_type = tstate->curexc_type;
local_value = tstate->curexc_value;
local_tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
#else
PyErr_Fetch(&local_type, &local_value, &local_tb);
#endif
PyErr_NormalizeException(&local_type, &local_value, &local_tb);
#if CYTHON_FAST_THREAD_STATE
if (unlikely(tstate->curexc_type))
#else
if (unlikely(PyErr_Occurred()))
#endif
goto bad;
#if PY_MAJOR_VERSION >= 3
if (local_tb) {
if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0))
goto bad;
}
#endif
Py_XINCREF(local_tb);
Py_XINCREF(local_type);
Py_XINCREF(local_value);
*type = local_type;
*value = local_value;
*tb = local_tb;
#if CYTHON_FAST_THREAD_STATE
#if CYTHON_USE_EXC_INFO_STACK
{
_PyErr_StackItem *exc_info = tstate->exc_info;
tmp_type = exc_info->exc_type;
tmp_value = exc_info->exc_value;
tmp_tb = exc_info->exc_traceback;
exc_info->exc_type = local_type;
exc_info->exc_value = local_value;
exc_info->exc_traceback = local_tb;
}
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = local_type;
tstate->exc_value = local_value;
tstate->exc_traceback = local_tb;
#endif
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
#else
PyErr_SetExcInfo(local_type, local_value, local_tb);
#endif
return 0;
bad:
*type = 0;
*value = 0;
*tb = 0;
Py_XDECREF(local_type);
Py_XDECREF(local_value);
Py_XDECREF(local_tb);
return -1;
}
/* SwapException */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
#if CYTHON_USE_EXC_INFO_STACK
_PyErr_StackItem *exc_info = tstate->exc_info;
tmp_type = exc_info->exc_type;
tmp_value = exc_info->exc_value;
tmp_tb = exc_info->exc_traceback;
exc_info->exc_type = *type;
exc_info->exc_value = *value;
exc_info->exc_traceback = *tb;
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = *type;
tstate->exc_value = *value;
tstate->exc_traceback = *tb;
#endif
*type = tmp_type;
*value = tmp_value;
*tb = tmp_tb;
}
#else
static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb);
PyErr_SetExcInfo(*type, *value, *tb);
*type = tmp_type;
*value = tmp_value;
*tb = tmp_tb;
}
#endif
/* Import */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) {
PyObject *empty_list = 0;
PyObject *module = 0;
PyObject *global_dict = 0;
PyObject *empty_dict = 0;
PyObject *list;
#if PY_MAJOR_VERSION < 3
PyObject *py_import;
py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import);
if (!py_import)
goto bad;
#endif
if (from_list)
list = from_list;
else {
empty_list = PyList_New(0);
if (!empty_list)
goto bad;
list = empty_list;
}
global_dict = PyModule_GetDict(__pyx_m);
if (!global_dict)
goto bad;
empty_dict = PyDict_New();
if (!empty_dict)
goto bad;
{
#if PY_MAJOR_VERSION >= 3
if (level == -1) {
if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) {
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, 1);
if (!module) {
if (!PyErr_ExceptionMatches(PyExc_ImportError))
goto bad;
PyErr_Clear();
}
}
level = 0;
}
#endif
if (!module) {
#if PY_MAJOR_VERSION < 3
PyObject *py_level = PyInt_FromLong(level);
if (!py_level)
goto bad;
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, py_level, (PyObject *)NULL);
Py_DECREF(py_level);
#else
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, level);
#endif
}
}
bad:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(py_import);
#endif
Py_XDECREF(empty_list);
Py_XDECREF(empty_dict);
return module;
}
/* FastTypeChecks */
#if CYTHON_COMPILING_IN_CPYTHON
static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) {
while (a) {
a = a->tp_base;
if (a == b)
return 1;
}
return b == &PyBaseObject_Type;
}
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) {
PyObject *mro;
if (a == b) return 1;
mro = a->tp_mro;
if (likely(mro)) {
Py_ssize_t i, n;
n = PyTuple_GET_SIZE(mro);
for (i = 0; i < n; i++) {
if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b)
return 1;
}
return 0;
}
return __Pyx_InBases(a, b);
}
#if PY_MAJOR_VERSION == 2
static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) {
PyObject *exception, *value, *tb;
int res;
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&exception, &value, &tb);
res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0;
if (unlikely(res == -1)) {
PyErr_WriteUnraisable(err);
res = 0;
}
if (!res) {
res = PyObject_IsSubclass(err, exc_type2);
if (unlikely(res == -1)) {
PyErr_WriteUnraisable(err);
res = 0;
}
}
__Pyx_ErrRestore(exception, value, tb);
return res;
}
#else
static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) {
int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0;
if (!res) {
res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2);
}
return res;
}
#endif
static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
Py_ssize_t i, n;
assert(PyExceptionClass_Check(exc_type));
n = PyTuple_GET_SIZE(tuple);
#if PY_MAJOR_VERSION >= 3
for (i=0; i<n; i++) {
if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
}
#endif
for (i=0; i<n; i++) {
PyObject *t = PyTuple_GET_ITEM(tuple, i);
#if PY_MAJOR_VERSION < 3
if (likely(exc_type == t)) return 1;
#endif
if (likely(PyExceptionClass_Check(t))) {
if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1;
} else {
}
}
return 0;
}
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) {
if (likely(err == exc_type)) return 1;
if (likely(PyExceptionClass_Check(err))) {
if (likely(PyExceptionClass_Check(exc_type))) {
return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type);
} else if (likely(PyTuple_Check(exc_type))) {
return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type);
} else {
}
}
return PyErr_GivenExceptionMatches(err, exc_type);
}
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) {
assert(PyExceptionClass_Check(exc_type1));
assert(PyExceptionClass_Check(exc_type2));
if (likely(err == exc_type1 || err == exc_type2)) return 1;
if (likely(PyExceptionClass_Check(err))) {
return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2);
}
return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2));
}
#endif
/* PyIntBinop */
#if !CYTHON_COMPILING_IN_PYPY
static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, int inplace, int zerodivision_check) {
(void)inplace;
(void)zerodivision_check;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(op1))) {
const long b = intval;
long x;
long a = PyInt_AS_LONG(op1);
x = (long)((unsigned long)a + b);
if (likely((x^a) >= 0 || (x^b) >= 0))
return PyInt_FromLong(x);
return PyLong_Type.tp_as_number->nb_add(op1, op2);
}
#endif
#if CYTHON_USE_PYLONG_INTERNALS
if (likely(PyLong_CheckExact(op1))) {
const long b = intval;
long a, x;
#ifdef HAVE_LONG_LONG
const PY_LONG_LONG llb = intval;
PY_LONG_LONG lla, llx;
#endif
const digit* digits = ((PyLongObject*)op1)->ob_digit;
const Py_ssize_t size = Py_SIZE(op1);
if (likely(__Pyx_sst_abs(size) <= 1)) {
a = likely(size) ? digits[0] : 0;
if (size == -1) a = -a;
} else {
switch (size) {
case -2:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case 2:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case -3:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case 3:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case -4:
if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case 4:
if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
default: return PyLong_Type.tp_as_number->nb_add(op1, op2);
}
}
x = a + b;
return PyLong_FromLong(x);
#ifdef HAVE_LONG_LONG
long_long:
llx = lla + llb;
return PyLong_FromLongLong(llx);
#endif
}
#endif
if (PyFloat_CheckExact(op1)) {
const long b = intval;
double a = PyFloat_AS_DOUBLE(op1);
double result;
PyFPE_START_PROTECT("add", return NULL)
result = ((double)a) + (double)b;
PyFPE_END_PROTECT(result)
return PyFloat_FromDouble(result);
}
return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2);
}
#endif
/* None */
static CYTHON_INLINE long __Pyx_div_long(long a, long b) {
long q = a / b;
long r = a - q*b;
q -= ((r != 0) & ((r ^ b) < 0));
return q;
}
/* ImportFrom */
static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) {
PyObject* value = __Pyx_PyObject_GetAttrStr(module, name);
if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Format(PyExc_ImportError,
#if PY_MAJOR_VERSION < 3
"cannot import name %.230s", PyString_AS_STRING(name));
#else
"cannot import name %S", name);
#endif
}
return value;
}
/* HasAttr */
static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) {
PyObject *r;
if (unlikely(!__Pyx_PyBaseString_Check(n))) {
PyErr_SetString(PyExc_TypeError,
"hasattr(): attribute name must be string");
return -1;
}
r = __Pyx_GetAttr(o, n);
if (unlikely(!r)) {
PyErr_Clear();
return 0;
} else {
Py_DECREF(r);
return 1;
}
}
/* PyObject_GenericGetAttrNoDict */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) {
PyErr_Format(PyExc_AttributeError,
#if PY_MAJOR_VERSION >= 3
"'%.50s' object has no attribute '%U'",
tp->tp_name, attr_name);
#else
"'%.50s' object has no attribute '%.400s'",
tp->tp_name, PyString_AS_STRING(attr_name));
#endif
return NULL;
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) {
PyObject *descr;
PyTypeObject *tp = Py_TYPE(obj);
if (unlikely(!PyString_Check(attr_name))) {
return PyObject_GenericGetAttr(obj, attr_name);
}
assert(!tp->tp_dictoffset);
descr = _PyType_Lookup(tp, attr_name);
if (unlikely(!descr)) {
return __Pyx_RaiseGenericGetAttributeError(tp, attr_name);
}
Py_INCREF(descr);
#if PY_MAJOR_VERSION < 3
if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS)))
#endif
{
descrgetfunc f = Py_TYPE(descr)->tp_descr_get;
if (unlikely(f)) {
PyObject *res = f(descr, obj, (PyObject *)tp);
Py_DECREF(descr);
return res;
}
}
return descr;
}
#endif
/* PyObject_GenericGetAttr */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) {
if (unlikely(Py_TYPE(obj)->tp_dictoffset)) {
return PyObject_GenericGetAttr(obj, attr_name);
}
return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name);
}
#endif
/* SetVTable */
static int __Pyx_SetVtable(PyObject *dict, void *vtable) {
#if PY_VERSION_HEX >= 0x02070000
PyObject *ob = PyCapsule_New(vtable, 0, 0);
#else
PyObject *ob = PyCObject_FromVoidPtr(vtable, 0);
#endif
if (!ob)
goto bad;
if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0)
goto bad;
Py_DECREF(ob);
return 0;
bad:
Py_XDECREF(ob);
return -1;
}
/* PyObjectGetAttrStrNoError */
static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) {
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError)))
__Pyx_PyErr_Clear();
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) {
PyObject *result;
#if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS && PY_VERSION_HEX >= 0x030700B1
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) {
return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1);
}
#endif
result = __Pyx_PyObject_GetAttrStr(obj, attr_name);
if (unlikely(!result)) {
__Pyx_PyObject_GetAttrStr_ClearAttributeError();
}
return result;
}
/* SetupReduce */
static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) {
int ret;
PyObject *name_attr;
name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name_2);
if (likely(name_attr)) {
ret = PyObject_RichCompareBool(name_attr, name, Py_EQ);
} else {
ret = -1;
}
if (unlikely(ret < 0)) {
PyErr_Clear();
ret = 0;
}
Py_XDECREF(name_attr);
return ret;
}
static int __Pyx_setup_reduce(PyObject* type_obj) {
int ret = 0;
PyObject *object_reduce = NULL;
PyObject *object_reduce_ex = NULL;
PyObject *reduce = NULL;
PyObject *reduce_ex = NULL;
PyObject *reduce_cython = NULL;
PyObject *setstate = NULL;
PyObject *setstate_cython = NULL;
#if CYTHON_USE_PYTYPE_LOOKUP
if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD;
#else
if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD;
#endif
#if CYTHON_USE_PYTYPE_LOOKUP
object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD;
#else
object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD;
#endif
reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto __PYX_BAD;
if (reduce_ex == object_reduce_ex) {
#if CYTHON_USE_PYTYPE_LOOKUP
object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD;
#else
object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD;
#endif
reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto __PYX_BAD;
if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) {
reduce_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_reduce_cython);
if (likely(reduce_cython)) {
ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
} else if (reduce == object_reduce || PyErr_Occurred()) {
goto __PYX_BAD;
}
setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate);
if (!setstate) PyErr_Clear();
if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) {
setstate_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_setstate_cython);
if (likely(setstate_cython)) {
ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
} else if (!setstate || PyErr_Occurred()) {
goto __PYX_BAD;
}
}
PyType_Modified((PyTypeObject*)type_obj);
}
}
goto __PYX_GOOD;
__PYX_BAD:
if (!PyErr_Occurred())
PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name);
ret = -1;
__PYX_GOOD:
#if !CYTHON_USE_PYTYPE_LOOKUP
Py_XDECREF(object_reduce);
Py_XDECREF(object_reduce_ex);
#endif
Py_XDECREF(reduce);
Py_XDECREF(reduce_ex);
Py_XDECREF(reduce_cython);
Py_XDECREF(setstate);
Py_XDECREF(setstate_cython);
return ret;
}
/* CLineInTraceback */
#ifndef CYTHON_CLINE_IN_TRACEBACK
static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) {
PyObject *use_cline;
PyObject *ptype, *pvalue, *ptraceback;
#if CYTHON_COMPILING_IN_CPYTHON
PyObject **cython_runtime_dict;
#endif
if (unlikely(!__pyx_cython_runtime)) {
return c_line;
}
__Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback);
#if CYTHON_COMPILING_IN_CPYTHON
cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime);
if (likely(cython_runtime_dict)) {
__PYX_PY_DICT_LOOKUP_IF_MODIFIED(
use_cline, *cython_runtime_dict,
__Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback))
} else
#endif
{
PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback);
if (use_cline_obj) {
use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True;
Py_DECREF(use_cline_obj);
} else {
PyErr_Clear();
use_cline = NULL;
}
}
if (!use_cline) {
c_line = 0;
PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False);
}
else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) {
c_line = 0;
}
__Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback);
return c_line;
}
#endif
/* CodeObjectCache */
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
int start = 0, mid = 0, end = count - 1;
if (end >= 0 && code_line > entries[end].code_line) {
return count;
}
while (start < end) {
mid = start + (end - start) / 2;
if (code_line < entries[mid].code_line) {
end = mid;
} else if (code_line > entries[mid].code_line) {
start = mid + 1;
} else {
return mid;
}
}
if (code_line <= entries[mid].code_line) {
return mid;
} else {
return mid + 1;
}
}
static PyCodeObject *__pyx_find_code_object(int code_line) {
PyCodeObject* code_object;
int pos;
if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) {
return NULL;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) {
return NULL;
}
code_object = __pyx_code_cache.entries[pos].code_object;
Py_INCREF(code_object);
return code_object;
}
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) {
int pos, i;
__Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
if (unlikely(!code_line)) {
return;
}
if (unlikely(!entries)) {
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));
if (likely(entries)) {
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = 64;
__pyx_code_cache.count = 1;
entries[0].code_line = code_line;
entries[0].code_object = code_object;
Py_INCREF(code_object);
}
return;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) {
PyCodeObject* tmp = entries[pos].code_object;
entries[pos].code_object = code_object;
Py_DECREF(tmp);
return;
}
if (__pyx_code_cache.count == __pyx_code_cache.max_count) {
int new_max = __pyx_code_cache.max_count + 64;
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(
__pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry));
if (unlikely(!entries)) {
return;
}
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = new_max;
}
for (i=__pyx_code_cache.count; i>pos; i--) {
entries[i] = entries[i-1];
}
entries[pos].code_line = code_line;
entries[pos].code_object = code_object;
__pyx_code_cache.count++;
Py_INCREF(code_object);
}
/* AddTraceback */
#include "compile.h"
#include "frameobject.h"
#include "traceback.h"
static PyCodeObject* __Pyx_CreateCodeObjectForTraceback(
const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyObject *py_srcfile = 0;
PyObject *py_funcname = 0;
#if PY_MAJOR_VERSION < 3
py_srcfile = PyString_FromString(filename);
#else
py_srcfile = PyUnicode_FromString(filename);
#endif
if (!py_srcfile) goto bad;
if (c_line) {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#else
py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#endif
}
else {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromString(funcname);
#else
py_funcname = PyUnicode_FromString(funcname);
#endif
}
if (!py_funcname) goto bad;
py_code = __Pyx_PyCode_New(
0,
0,
0,
0,
0,
__pyx_empty_bytes, /*PyObject *code,*/
__pyx_empty_tuple, /*PyObject *consts,*/
__pyx_empty_tuple, /*PyObject *names,*/
__pyx_empty_tuple, /*PyObject *varnames,*/
__pyx_empty_tuple, /*PyObject *freevars,*/
__pyx_empty_tuple, /*PyObject *cellvars,*/
py_srcfile, /*PyObject *filename,*/
py_funcname, /*PyObject *name,*/
py_line,
__pyx_empty_bytes /*PyObject *lnotab*/
);
Py_DECREF(py_srcfile);
Py_DECREF(py_funcname);
return py_code;
bad:
Py_XDECREF(py_srcfile);
Py_XDECREF(py_funcname);
return NULL;
}
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyFrameObject *py_frame = 0;
PyThreadState *tstate = __Pyx_PyThreadState_Current;
if (c_line) {
c_line = __Pyx_CLineForTraceback(tstate, c_line);
}
py_code = __pyx_find_code_object(c_line ? -c_line : py_line);
if (!py_code) {
py_code = __Pyx_CreateCodeObjectForTraceback(
funcname, c_line, py_line, filename);
if (!py_code) goto bad;
__pyx_insert_code_object(c_line ? -c_line : py_line, py_code);
}
py_frame = PyFrame_New(
tstate, /*PyThreadState *tstate,*/
py_code, /*PyCodeObject *code,*/
__pyx_d, /*PyObject *globals,*/
0 /*PyObject *locals*/
);
if (!py_frame) goto bad;
__Pyx_PyFrame_SetLineNumber(py_frame, py_line);
PyTraceBack_Here(py_frame);
bad:
Py_XDECREF(py_code);
Py_XDECREF(py_frame);
}
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) {
if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags);
if (__Pyx_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags);
if (__Pyx_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags);
PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name);
return -1;
}
static void __Pyx_ReleaseBuffer(Py_buffer *view) {
PyObject *obj = view->obj;
if (!obj) return;
if (PyObject_CheckBuffer(obj)) {
PyBuffer_Release(view);
return;
}
if ((0)) {}
view->obj = NULL;
Py_DECREF(obj);
}
#endif
/* MemviewSliceIsContig */
static int
__pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim)
{
int i, index, step, start;
Py_ssize_t itemsize = mvs.memview->view.itemsize;
if (order == 'F') {
step = 1;
start = 0;
} else {
step = -1;
start = ndim - 1;
}
for (i = 0; i < ndim; i++) {
index = start + step * i;
if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize)
return 0;
itemsize *= mvs.shape[index];
}
return 1;
}
/* OverlappingSlices */
static void
__pyx_get_array_memory_extents(__Pyx_memviewslice *slice,
void **out_start, void **out_end,
int ndim, size_t itemsize)
{
char *start, *end;
int i;
start = end = slice->data;
for (i = 0; i < ndim; i++) {
Py_ssize_t stride = slice->strides[i];
Py_ssize_t extent = slice->shape[i];
if (extent == 0) {
*out_start = *out_end = start;
return;
} else {
if (stride > 0)
end += stride * (extent - 1);
else
start += stride * (extent - 1);
}
}
*out_start = start;
*out_end = end + itemsize;
}
static int
__pyx_slices_overlap(__Pyx_memviewslice *slice1,
__Pyx_memviewslice *slice2,
int ndim, size_t itemsize)
{
void *start1, *end1, *start2, *end2;
__pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize);
__pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize);
return (start1 < end2) && (start2 < end1);
}
/* Capsule */
static CYTHON_INLINE PyObject *
__pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig)
{
PyObject *cobj;
#if PY_VERSION_HEX >= 0x02070000
cobj = PyCapsule_New(p, sig, NULL);
#else
cobj = PyCObject_FromVoidPtr(p, NULL);
#endif
return cobj;
}
/* IsLittleEndian */
static CYTHON_INLINE int __Pyx_Is_Little_Endian(void)
{
union {
uint32_t u32;
uint8_t u8[4];
} S;
S.u32 = 0x01020304;
return S.u8[0] == 4;
}
/* BufferFormatCheck */
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type) {
stack[0].field = &ctx->root;
stack[0].parent_offset = 0;
ctx->root.type = type;
ctx->root.name = "buffer dtype";
ctx->root.offset = 0;
ctx->head = stack;
ctx->head->field = &ctx->root;
ctx->fmt_offset = 0;
ctx->head->parent_offset = 0;
ctx->new_packmode = '@';
ctx->enc_packmode = '@';
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->is_complex = 0;
ctx->is_valid_array = 0;
ctx->struct_alignment = 0;
while (type->typegroup == 'S') {
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = 0;
type = type->fields->type;
}
}
static int __Pyx_BufFmt_ParseNumber(const char** ts) {
int count;
const char* t = *ts;
if (*t < '0' || *t > '9') {
return -1;
} else {
count = *t++ - '0';
while (*t >= '0' && *t <= '9') {
count *= 10;
count += *t++ - '0';
}
}
*ts = t;
return count;
}
static int __Pyx_BufFmt_ExpectNumber(const char **ts) {
int number = __Pyx_BufFmt_ParseNumber(ts);
if (number == -1)
PyErr_Format(PyExc_ValueError,\
"Does not understand character buffer dtype format string ('%c')", **ts);
return number;
}
static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) {
PyErr_Format(PyExc_ValueError,
"Unexpected format string character: '%c'", ch);
}
static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) {
switch (ch) {
case '?': return "'bool'";
case 'c': return "'char'";
case 'b': return "'signed char'";
case 'B': return "'unsigned char'";
case 'h': return "'short'";
case 'H': return "'unsigned short'";
case 'i': return "'int'";
case 'I': return "'unsigned int'";
case 'l': return "'long'";
case 'L': return "'unsigned long'";
case 'q': return "'long long'";
case 'Q': return "'unsigned long long'";
case 'f': return (is_complex ? "'complex float'" : "'float'");
case 'd': return (is_complex ? "'complex double'" : "'double'");
case 'g': return (is_complex ? "'complex long double'" : "'long double'");
case 'T': return "a struct";
case 'O': return "Python object";
case 'P': return "a pointer";
case 's': case 'p': return "a string";
case 0: return "end";
default: return "unparseable format string";
}
}
static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return 2;
case 'i': case 'I': case 'l': case 'L': return 4;
case 'q': case 'Q': return 8;
case 'f': return (is_complex ? 8 : 4);
case 'd': return (is_complex ? 16 : 8);
case 'g': {
PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g')..");
return 0;
}
case 'O': case 'P': return sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(short);
case 'i': case 'I': return sizeof(int);
case 'l': case 'L': return sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(float) * (is_complex ? 2 : 1);
case 'd': return sizeof(double) * (is_complex ? 2 : 1);
case 'g': return sizeof(long double) * (is_complex ? 2 : 1);
case 'O': case 'P': return sizeof(void*);
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
typedef struct { char c; short x; } __Pyx_st_short;
typedef struct { char c; int x; } __Pyx_st_int;
typedef struct { char c; long x; } __Pyx_st_long;
typedef struct { char c; float x; } __Pyx_st_float;
typedef struct { char c; double x; } __Pyx_st_double;
typedef struct { char c; long double x; } __Pyx_st_longdouble;
typedef struct { char c; void *x; } __Pyx_st_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_st_float) - sizeof(float);
case 'd': return sizeof(__Pyx_st_double) - sizeof(double);
case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
/* These are for computing the padding at the end of the struct to align
on the first member of the struct. This will probably the same as above,
but we don't have any guarantees.
*/
typedef struct { short x; char c; } __Pyx_pad_short;
typedef struct { int x; char c; } __Pyx_pad_int;
typedef struct { long x; char c; } __Pyx_pad_long;
typedef struct { float x; char c; } __Pyx_pad_float;
typedef struct { double x; char c; } __Pyx_pad_double;
typedef struct { long double x; char c; } __Pyx_pad_longdouble;
typedef struct { void *x; char c; } __Pyx_pad_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_pad_float) - sizeof(float);
case 'd': return sizeof(__Pyx_pad_double) - sizeof(double);
case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) {
switch (ch) {
case 'c':
return 'H';
case 'b': case 'h': case 'i':
case 'l': case 'q': case 's': case 'p':
return 'I';
case '?': case 'B': case 'H': case 'I': case 'L': case 'Q':
return 'U';
case 'f': case 'd': case 'g':
return (is_complex ? 'C' : 'R');
case 'O':
return 'O';
case 'P':
return 'P';
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) {
if (ctx->head == NULL || ctx->head->field == &ctx->root) {
const char* expected;
const char* quote;
if (ctx->head == NULL) {
expected = "end";
quote = "";
} else {
expected = ctx->head->field->type->name;
quote = "'";
}
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected %s%s%s but got %s",
quote, expected, quote,
__Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex));
} else {
__Pyx_StructField* field = ctx->head->field;
__Pyx_StructField* parent = (ctx->head - 1)->field;
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'",
field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex),
parent->type->name, field->name);
}
}
static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) {
char group;
size_t size, offset, arraysize = 1;
if (ctx->enc_type == 0) return 0;
if (ctx->head->field->type->arraysize[0]) {
int i, ndim = 0;
if (ctx->enc_type == 's' || ctx->enc_type == 'p') {
ctx->is_valid_array = ctx->head->field->type->ndim == 1;
ndim = 1;
if (ctx->enc_count != ctx->head->field->type->arraysize[0]) {
PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %zu",
ctx->head->field->type->arraysize[0], ctx->enc_count);
return -1;
}
}
if (!ctx->is_valid_array) {
PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d",
ctx->head->field->type->ndim, ndim);
return -1;
}
for (i = 0; i < ctx->head->field->type->ndim; i++) {
arraysize *= ctx->head->field->type->arraysize[i];
}
ctx->is_valid_array = 0;
ctx->enc_count = 1;
}
group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex);
do {
__Pyx_StructField* field = ctx->head->field;
__Pyx_TypeInfo* type = field->type;
if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') {
size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex);
} else {
size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex);
}
if (ctx->enc_packmode == '@') {
size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex);
size_t align_mod_offset;
if (align_at == 0) return -1;
align_mod_offset = ctx->fmt_offset % align_at;
if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset;
if (ctx->struct_alignment == 0)
ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type,
ctx->is_complex);
}
if (type->size != size || type->typegroup != group) {
if (type->typegroup == 'C' && type->fields != NULL) {
size_t parent_offset = ctx->head->parent_offset + field->offset;
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = parent_offset;
continue;
}
if ((type->typegroup == 'H' || group == 'H') && type->size == size) {
} else {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
}
offset = ctx->head->parent_offset + field->offset;
if (ctx->fmt_offset != offset) {
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected",
(Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset);
return -1;
}
ctx->fmt_offset += size;
if (arraysize)
ctx->fmt_offset += (arraysize - 1) * size;
--ctx->enc_count;
while (1) {
if (field == &ctx->root) {
ctx->head = NULL;
if (ctx->enc_count != 0) {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
break;
}
ctx->head->field = ++field;
if (field->type == NULL) {
--ctx->head;
field = ctx->head->field;
continue;
} else if (field->type->typegroup == 'S') {
size_t parent_offset = ctx->head->parent_offset + field->offset;
if (field->type->fields->type == NULL) continue;
field = field->type->fields;
++ctx->head;
ctx->head->field = field;
ctx->head->parent_offset = parent_offset;
break;
} else {
break;
}
}
} while (ctx->enc_count);
ctx->enc_type = 0;
ctx->is_complex = 0;
return 0;
}
static PyObject *
__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp)
{
const char *ts = *tsp;
int i = 0, number, ndim;
++ts;
if (ctx->new_count != 1) {
PyErr_SetString(PyExc_ValueError,
"Cannot handle repeated arrays in format string");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ndim = ctx->head->field->type->ndim;
while (*ts && *ts != ')') {
switch (*ts) {
case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue;
default: break;
}
number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i])
return PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %d",
ctx->head->field->type->arraysize[i], number);
if (*ts != ',' && *ts != ')')
return PyErr_Format(PyExc_ValueError,
"Expected a comma in format string, got '%c'", *ts);
if (*ts == ',') ts++;
i++;
}
if (i != ndim)
return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d",
ctx->head->field->type->ndim, i);
if (!*ts) {
PyErr_SetString(PyExc_ValueError,
"Unexpected end of format string, expected ')'");
return NULL;
}
ctx->is_valid_array = 1;
ctx->new_count = 1;
*tsp = ++ts;
return Py_None;
}
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) {
int got_Z = 0;
while (1) {
switch(*ts) {
case 0:
if (ctx->enc_type != 0 && ctx->head == NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
if (ctx->head != NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
return ts;
case ' ':
case '\r':
case '\n':
++ts;
break;
case '<':
if (!__Pyx_Is_Little_Endian()) {
PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '>':
case '!':
if (__Pyx_Is_Little_Endian()) {
PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '=':
case '@':
case '^':
ctx->new_packmode = *ts++;
break;
case 'T':
{
const char* ts_after_sub;
size_t i, struct_count = ctx->new_count;
size_t struct_alignment = ctx->struct_alignment;
ctx->new_count = 1;
++ts;
if (*ts != '{') {
PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0;
ctx->enc_count = 0;
ctx->struct_alignment = 0;
++ts;
ts_after_sub = ts;
for (i = 0; i != struct_count; ++i) {
ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts);
if (!ts_after_sub) return NULL;
}
ts = ts_after_sub;
if (struct_alignment) ctx->struct_alignment = struct_alignment;
}
break;
case '}':
{
size_t alignment = ctx->struct_alignment;
++ts;
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0;
if (alignment && ctx->fmt_offset % alignment) {
ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment);
}
}
return ts;
case 'x':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->fmt_offset += ctx->new_count;
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->enc_packmode = ctx->new_packmode;
++ts;
break;
case 'Z':
got_Z = 1;
++ts;
if (*ts != 'f' && *ts != 'd' && *ts != 'g') {
__Pyx_BufFmt_RaiseUnexpectedChar('Z');
return NULL;
}
CYTHON_FALLTHROUGH;
case '?': case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I':
case 'l': case 'L': case 'q': case 'Q':
case 'f': case 'd': case 'g':
case 'O': case 'p':
if ((ctx->enc_type == *ts) && (got_Z == ctx->is_complex) &&
(ctx->enc_packmode == ctx->new_packmode) && (!ctx->is_valid_array)) {
ctx->enc_count += ctx->new_count;
ctx->new_count = 1;
got_Z = 0;
++ts;
break;
}
CYTHON_FALLTHROUGH;
case 's':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_count = ctx->new_count;
ctx->enc_packmode = ctx->new_packmode;
ctx->enc_type = *ts;
ctx->is_complex = got_Z;
++ts;
ctx->new_count = 1;
got_Z = 0;
break;
case ':':
++ts;
while(*ts != ':') ++ts;
++ts;
break;
case '(':
if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL;
break;
default:
{
int number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
ctx->new_count = (size_t)number;
}
}
}
}
/* TypeInfoCompare */
static int
__pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b)
{
int i;
if (!a || !b)
return 0;
if (a == b)
return 1;
if (a->size != b->size || a->typegroup != b->typegroup ||
a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) {
if (a->typegroup == 'H' || b->typegroup == 'H') {
return a->size == b->size;
} else {
return 0;
}
}
if (a->ndim) {
for (i = 0; i < a->ndim; i++)
if (a->arraysize[i] != b->arraysize[i])
return 0;
}
if (a->typegroup == 'S') {
if (a->flags != b->flags)
return 0;
if (a->fields || b->fields) {
if (!(a->fields && b->fields))
return 0;
for (i = 0; a->fields[i].type && b->fields[i].type; i++) {
__Pyx_StructField *field_a = a->fields + i;
__Pyx_StructField *field_b = b->fields + i;
if (field_a->offset != field_b->offset ||
!__pyx_typeinfo_cmp(field_a->type, field_b->type))
return 0;
}
return !a->fields[i].type && !b->fields[i].type;
}
}
return 1;
}
/* MemviewSliceValidateAndInit */
static int
__pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec)
{
if (buf->shape[dim] <= 1)
return 1;
if (buf->strides) {
if (spec & __Pyx_MEMVIEW_CONTIG) {
if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) {
if (unlikely(buf->strides[dim] != sizeof(void *))) {
PyErr_Format(PyExc_ValueError,
"Buffer is not indirectly contiguous "
"in dimension %d.", dim);
goto fail;
}
} else if (unlikely(buf->strides[dim] != buf->itemsize)) {
PyErr_SetString(PyExc_ValueError,
"Buffer and memoryview are not contiguous "
"in the same dimension.");
goto fail;
}
}
if (spec & __Pyx_MEMVIEW_FOLLOW) {
Py_ssize_t stride = buf->strides[dim];
if (stride < 0)
stride = -stride;
if (unlikely(stride < buf->itemsize)) {
PyErr_SetString(PyExc_ValueError,
"Buffer and memoryview are not contiguous "
"in the same dimension.");
goto fail;
}
}
} else {
if (unlikely(spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1)) {
PyErr_Format(PyExc_ValueError,
"C-contiguous buffer is not contiguous in "
"dimension %d", dim);
goto fail;
} else if (unlikely(spec & (__Pyx_MEMVIEW_PTR))) {
PyErr_Format(PyExc_ValueError,
"C-contiguous buffer is not indirect in "
"dimension %d", dim);
goto fail;
} else if (unlikely(buf->suboffsets)) {
PyErr_SetString(PyExc_ValueError,
"Buffer exposes suboffsets but no strides");
goto fail;
}
}
return 1;
fail:
return 0;
}
static int
__pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec)
{
if (spec & __Pyx_MEMVIEW_DIRECT) {
if (unlikely(buf->suboffsets && buf->suboffsets[dim] >= 0)) {
PyErr_Format(PyExc_ValueError,
"Buffer not compatible with direct access "
"in dimension %d.", dim);
goto fail;
}
}
if (spec & __Pyx_MEMVIEW_PTR) {
if (unlikely(!buf->suboffsets || (buf->suboffsets[dim] < 0))) {
PyErr_Format(PyExc_ValueError,
"Buffer is not indirectly accessible "
"in dimension %d.", dim);
goto fail;
}
}
return 1;
fail:
return 0;
}
static int
__pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag)
{
int i;
if (c_or_f_flag & __Pyx_IS_F_CONTIG) {
Py_ssize_t stride = 1;
for (i = 0; i < ndim; i++) {
if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) {
PyErr_SetString(PyExc_ValueError,
"Buffer not fortran contiguous.");
goto fail;
}
stride = stride * buf->shape[i];
}
} else if (c_or_f_flag & __Pyx_IS_C_CONTIG) {
Py_ssize_t stride = 1;
for (i = ndim - 1; i >- 1; i--) {
if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) {
PyErr_SetString(PyExc_ValueError,
"Buffer not C contiguous.");
goto fail;
}
stride = stride * buf->shape[i];
}
}
return 1;
fail:
return 0;
}
static int __Pyx_ValidateAndInit_memviewslice(
int *axes_specs,
int c_or_f_flag,
int buf_flags,
int ndim,
__Pyx_TypeInfo *dtype,
__Pyx_BufFmt_StackElem stack[],
__Pyx_memviewslice *memviewslice,
PyObject *original_obj)
{
struct __pyx_memoryview_obj *memview, *new_memview;
__Pyx_RefNannyDeclarations
Py_buffer *buf;
int i, spec = 0, retval = -1;
__Pyx_BufFmt_Context ctx;
int from_memoryview = __pyx_memoryview_check(original_obj);
__Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0);
if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *)
original_obj)->typeinfo)) {
memview = (struct __pyx_memoryview_obj *) original_obj;
new_memview = NULL;
} else {
memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new(
original_obj, buf_flags, 0, dtype);
new_memview = memview;
if (unlikely(!memview))
goto fail;
}
buf = &memview->view;
if (unlikely(buf->ndim != ndim)) {
PyErr_Format(PyExc_ValueError,
"Buffer has wrong number of dimensions (expected %d, got %d)",
ndim, buf->ndim);
goto fail;
}
if (new_memview) {
__Pyx_BufFmt_Init(&ctx, stack, dtype);
if (unlikely(!__Pyx_BufFmt_CheckString(&ctx, buf->format))) goto fail;
}
if (unlikely((unsigned) buf->itemsize != dtype->size)) {
PyErr_Format(PyExc_ValueError,
"Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) "
"does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)",
buf->itemsize,
(buf->itemsize > 1) ? "s" : "",
dtype->name,
dtype->size,
(dtype->size > 1) ? "s" : "");
goto fail;
}
if (buf->len > 0) {
for (i = 0; i < ndim; i++) {
spec = axes_specs[i];
if (unlikely(!__pyx_check_strides(buf, i, ndim, spec)))
goto fail;
if (unlikely(!__pyx_check_suboffsets(buf, i, ndim, spec)))
goto fail;
}
if (unlikely(buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag)))
goto fail;
}
if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice,
new_memview != NULL) == -1)) {
goto fail;
}
retval = 0;
goto no_fail;
fail:
Py_XDECREF(new_memview);
retval = -1;
no_fail:
__Pyx_RefNannyFinishContext();
return retval;
}
/* ObjectToMemviewSlice */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_int(PyObject *obj, int writable_flag) {
__Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_BufFmt_StackElem stack[1];
int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) };
int retcode;
if (obj == Py_None) {
result.memview = (struct __pyx_memoryview_obj *) Py_None;
return result;
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG,
(PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 3,
&__Pyx_TypeInfo_int, stack,
&result, obj);
if (unlikely(retcode == -1))
goto __pyx_fail;
return result;
__pyx_fail:
result.memview = NULL;
result.data = NULL;
return result;
}
/* ObjectToMemviewSlice */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_float(PyObject *obj, int writable_flag) {
__Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_BufFmt_StackElem stack[1];
int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) };
int retcode;
if (obj == Py_None) {
result.memview = (struct __pyx_memoryview_obj *) Py_None;
return result;
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG,
(PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 3,
&__Pyx_TypeInfo_float, stack,
&result, obj);
if (unlikely(retcode == -1))
goto __pyx_fail;
return result;
__pyx_fail:
result.memview = NULL;
result.data = NULL;
return result;
}
/* ObjectToMemviewSlice */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int(PyObject *obj, int writable_flag) {
__Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_BufFmt_StackElem stack[1];
int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) };
int retcode;
if (obj == Py_None) {
result.memview = (struct __pyx_memoryview_obj *) Py_None;
return result;
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG,
(PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 1,
&__Pyx_TypeInfo_int, stack,
&result, obj);
if (unlikely(retcode == -1))
goto __pyx_fail;
return result;
__pyx_fail:
result.memview = NULL;
result.data = NULL;
return result;
}
/* CIntFromPyVerify */
#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0)
#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1)
#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\
{\
func_type value = func_value;\
if (sizeof(target_type) < sizeof(func_type)) {\
if (unlikely(value != (func_type) (target_type) value)) {\
func_type zero = 0;\
if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\
return (target_type) -1;\
if (is_unsigned && unlikely(value < zero))\
goto raise_neg_overflow;\
else\
goto raise_overflow;\
}\
}\
return (target_type) value;\
}
/* MemviewSliceCopyTemplate */
static __Pyx_memviewslice
__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs,
const char *mode, int ndim,
size_t sizeof_dtype, int contig_flag,
int dtype_is_object)
{
__Pyx_RefNannyDeclarations
int i;
__Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } };
struct __pyx_memoryview_obj *from_memview = from_mvs->memview;
Py_buffer *buf = &from_memview->view;
PyObject *shape_tuple = NULL;
PyObject *temp_int = NULL;
struct __pyx_array_obj *array_obj = NULL;
struct __pyx_memoryview_obj *memview_obj = NULL;
__Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0);
for (i = 0; i < ndim; i++) {
if (unlikely(from_mvs->suboffsets[i] >= 0)) {
PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with "
"indirect dimensions (axis %d)", i);
goto fail;
}
}
shape_tuple = PyTuple_New(ndim);
if (unlikely(!shape_tuple)) {
goto fail;
}
__Pyx_GOTREF(shape_tuple);
for(i = 0; i < ndim; i++) {
temp_int = PyInt_FromSsize_t(from_mvs->shape[i]);
if(unlikely(!temp_int)) {
goto fail;
} else {
PyTuple_SET_ITEM(shape_tuple, i, temp_int);
temp_int = NULL;
}
}
array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL);
if (unlikely(!array_obj)) {
goto fail;
}
__Pyx_GOTREF(array_obj);
memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new(
(PyObject *) array_obj, contig_flag,
dtype_is_object,
from_mvs->memview->typeinfo);
if (unlikely(!memview_obj))
goto fail;
if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0))
goto fail;
if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim,
dtype_is_object) < 0))
goto fail;
goto no_fail;
fail:
__Pyx_XDECREF(new_mvs.memview);
new_mvs.memview = NULL;
new_mvs.data = NULL;
no_fail:
__Pyx_XDECREF(shape_tuple);
__Pyx_XDECREF(temp_int);
__Pyx_XDECREF(array_obj);
__Pyx_RefNannyFinishContext();
return new_mvs;
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
const int neg_one = (int) -1, const_zero = (int) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(int) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(int) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(int) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(int),
little, !is_unsigned);
}
}
/* CIntFromPy */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
const int neg_one = (int) -1, const_zero = (int) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(int) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (int) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0])
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) {
return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) {
return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) {
return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (int) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(int) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0])
case -2:
if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
}
#endif
if (sizeof(int) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
int val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (int) -1;
}
} else {
int val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (int) -1;
val = __Pyx_PyInt_As_int(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to int");
return (int) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to int");
return (int) -1;
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
const long neg_one = (long) -1, const_zero = (long) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(long) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(long) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(long) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(long),
little, !is_unsigned);
}
}
/* CIntFromPy */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
const long neg_one = (long) -1, const_zero = (long) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(long) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (long) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0])
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) {
return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) {
return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) {
return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (long) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(long) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0])
case -2:
if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
}
#endif
if (sizeof(long) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
long val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (long) -1;
}
} else {
long val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (long) -1;
val = __Pyx_PyInt_As_long(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to long");
return (long) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to long");
return (long) -1;
}
/* CIntFromPy */
static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
const char neg_one = (char) -1, const_zero = (char) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(char) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (char) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (char) 0;
case 1: __PYX_VERIFY_RETURN_INT(char, digit, digits[0])
case 2:
if (8 * sizeof(char) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) >= 2 * PyLong_SHIFT) {
return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(char) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) >= 3 * PyLong_SHIFT) {
return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(char) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) >= 4 * PyLong_SHIFT) {
return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (char) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(char) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (char) 0;
case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(char, digit, +digits[0])
case -2:
if (8 * sizeof(char) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) {
return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(char) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) {
return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) {
return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(char) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) {
return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) {
return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(char) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) {
return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
}
#endif
if (sizeof(char) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(char) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
char val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (char) -1;
}
} else {
char val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (char) -1;
val = __Pyx_PyInt_As_char(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to char");
return (char) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to char");
return (char) -1;
}
/* CheckBinaryVersion */
static int __Pyx_check_binary_version(void) {
char ctversion[4], rtversion[4];
PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION);
PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion());
if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) {
char message[200];
PyOS_snprintf(message, sizeof(message),
"compiletime version %s of module '%.100s' "
"does not match runtime version %s",
ctversion, __Pyx_MODULE_NAME, rtversion);
return PyErr_WarnEx(NULL, message, 1);
}
return 0;
}
/* InitStrings */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
while (t->p) {
#if PY_MAJOR_VERSION < 3
if (t->is_unicode) {
*t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
} else if (t->intern) {
*t->p = PyString_InternFromString(t->s);
} else {
*t->p = PyString_FromStringAndSize(t->s, t->n - 1);
}
#else
if (t->is_unicode | t->is_str) {
if (t->intern) {
*t->p = PyUnicode_InternFromString(t->s);
} else if (t->encoding) {
*t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
} else {
*t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
}
} else {
*t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
}
#endif
if (!*t->p)
return -1;
if (PyObject_Hash(*t->p) == -1)
return -1;
++t;
}
return 0;
}
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) {
return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str));
}
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) {
Py_ssize_t ignore;
return __Pyx_PyObject_AsStringAndSize(o, &ignore);
}
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
#if !CYTHON_PEP393_ENABLED
static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
char* defenc_c;
PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL);
if (!defenc) return NULL;
defenc_c = PyBytes_AS_STRING(defenc);
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
{
char* end = defenc_c + PyBytes_GET_SIZE(defenc);
char* c;
for (c = defenc_c; c < end; c++) {
if ((unsigned char) (*c) >= 128) {
PyUnicode_AsASCIIString(o);
return NULL;
}
}
}
#endif
*length = PyBytes_GET_SIZE(defenc);
return defenc_c;
}
#else
static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL;
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
if (likely(PyUnicode_IS_ASCII(o))) {
*length = PyUnicode_GET_LENGTH(o);
return PyUnicode_AsUTF8(o);
} else {
PyUnicode_AsASCIIString(o);
return NULL;
}
#else
return PyUnicode_AsUTF8AndSize(o, length);
#endif
}
#endif
#endif
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
if (
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
__Pyx_sys_getdefaultencoding_not_ascii &&
#endif
PyUnicode_Check(o)) {
return __Pyx_PyUnicode_AsStringAndSize(o, length);
} else
#endif
#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE))
if (PyByteArray_Check(o)) {
*length = PyByteArray_GET_SIZE(o);
return PyByteArray_AS_STRING(o);
} else
#endif
{
char* result;
int r = PyBytes_AsStringAndSize(o, &result, length);
if (unlikely(r < 0)) {
return NULL;
} else {
return result;
}
}
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
int is_true = x == Py_True;
if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
else return PyObject_IsTrue(x);
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) {
int retval;
if (unlikely(!x)) return -1;
retval = __Pyx_PyObject_IsTrue(x);
Py_DECREF(x);
return retval;
}
static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) {
#if PY_MAJOR_VERSION >= 3
if (PyLong_Check(result)) {
if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1,
"__int__ returned non-int (type %.200s). "
"The ability to return an instance of a strict subclass of int "
"is deprecated, and may be removed in a future version of Python.",
Py_TYPE(result)->tp_name)) {
Py_DECREF(result);
return NULL;
}
return result;
}
#endif
PyErr_Format(PyExc_TypeError,
"__%.4s__ returned non-%.4s (type %.200s)",
type_name, type_name, Py_TYPE(result)->tp_name);
Py_DECREF(result);
return NULL;
}
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) {
#if CYTHON_USE_TYPE_SLOTS
PyNumberMethods *m;
#endif
const char *name = NULL;
PyObject *res = NULL;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x) || PyLong_Check(x)))
#else
if (likely(PyLong_Check(x)))
#endif
return __Pyx_NewRef(x);
#if CYTHON_USE_TYPE_SLOTS
m = Py_TYPE(x)->tp_as_number;
#if PY_MAJOR_VERSION < 3
if (m && m->nb_int) {
name = "int";
res = m->nb_int(x);
}
else if (m && m->nb_long) {
name = "long";
res = m->nb_long(x);
}
#else
if (likely(m && m->nb_int)) {
name = "int";
res = m->nb_int(x);
}
#endif
#else
if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) {
res = PyNumber_Int(x);
}
#endif
if (likely(res)) {
#if PY_MAJOR_VERSION < 3
if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) {
#else
if (unlikely(!PyLong_CheckExact(res))) {
#endif
return __Pyx_PyNumber_IntOrLongWrongResultType(res, name);
}
}
else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_TypeError,
"an integer is required");
}
return res;
}
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
Py_ssize_t ival;
PyObject *x;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(b))) {
if (sizeof(Py_ssize_t) >= sizeof(long))
return PyInt_AS_LONG(b);
else
return PyInt_AsSsize_t(b);
}
#endif
if (likely(PyLong_CheckExact(b))) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)b)->ob_digit;
const Py_ssize_t size = Py_SIZE(b);
if (likely(__Pyx_sst_abs(size) <= 1)) {
ival = likely(size) ? digits[0] : 0;
if (size == -1) ival = -ival;
return ival;
} else {
switch (size) {
case 2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
}
}
#endif
return PyLong_AsSsize_t(b);
}
x = PyNumber_Index(b);
if (!x) return -1;
ival = PyInt_AsSsize_t(x);
Py_DECREF(x);
return ival;
}
static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) {
return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False);
}
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
return PyInt_FromSize_t(ival);
}
#endif /* Py_PYTHON_H */
|
threshold.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% TTTTT H H RRRR EEEEE SSSSS H H OOO L DDDD %
% T H H R R E SS H H O O L D D %
% T HHHHH RRRR EEE SSS HHHHH O O L D D %
% T H H R R E SS H H O O L D D %
% T H H R R EEEEE SSSSS H H OOO LLLLL DDDD %
% %
% %
% MagickCore Image Threshold Methods %
% %
% Software Design %
% Cristy %
% October 1996 %
% %
% %
% Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/property.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/configure.h"
#include "MagickCore/constitute.h"
#include "MagickCore/decorate.h"
#include "MagickCore/draw.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/effect.h"
#include "MagickCore/fx.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/montage.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/shear.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/token.h"
#include "MagickCore/transform.h"
#include "MagickCore/xml-tree.h"
#include "MagickCore/xml-tree-private.h"
/*
Define declarations.
*/
#define ThresholdsFilename "thresholds.xml"
/*
Typedef declarations.
*/
struct _ThresholdMap
{
char
*map_id,
*description;
size_t
width,
height;
ssize_t
divisor,
*levels;
};
/*
Static declarations.
*/
static const char
*MinimalThresholdMap =
"<?xml version=\"1.0\"?>"
"<thresholds>"
" <threshold map=\"threshold\" alias=\"1x1\">"
" <description>Threshold 1x1 (non-dither)</description>"
" <levels width=\"1\" height=\"1\" divisor=\"2\">"
" 1"
" </levels>"
" </threshold>"
" <threshold map=\"checks\" alias=\"2x1\">"
" <description>Checkerboard 2x1 (dither)</description>"
" <levels width=\"2\" height=\"2\" divisor=\"3\">"
" 1 2"
" 2 1"
" </levels>"
" </threshold>"
"</thresholds>";
/*
Forward declarations.
*/
static ThresholdMap
*GetThresholdMapFile(const char *,const char *,const char *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveThresholdImage() selects an individual threshold for each pixel
% based on the range of intensity values in its local neighborhood. This
% allows for thresholding of an image whose global intensity histogram
% doesn't contain distinctive peaks.
%
% The format of the AdaptiveThresholdImage method is:
%
% Image *AdaptiveThresholdImage(const Image *image,const size_t width,
% const size_t height,const double bias,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the width of the local neighborhood.
%
% o height: the height of the local neighborhood.
%
% o bias: the mean bias.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveThresholdImage(const Image *image,
const size_t width,const size_t height,const double bias,
ExceptionInfo *exception)
{
#define AdaptiveThresholdImageTag "AdaptiveThreshold/Image"
CacheView
*image_view,
*threshold_view;
Image
*threshold_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickSizeType
number_pixels;
ssize_t
y;
/*
Initialize threshold image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
threshold_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (threshold_image == (Image *) NULL)
return((Image *) NULL);
status=SetImageStorageClass(threshold_image,DirectClass,exception);
if (status == MagickFalse)
{
threshold_image=DestroyImage(threshold_image);
return((Image *) NULL);
}
/*
Threshold image.
*/
status=MagickTrue;
progress=0;
number_pixels=(MagickSizeType) width*height;
image_view=AcquireVirtualCacheView(image,exception);
threshold_view=AcquireAuthenticCacheView(threshold_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,threshold_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
channel_bias[MaxPixelChannels],
channel_sum[MaxPixelChannels];
register const Quantum
*restrict p,
*restrict pixels;
register Quantum
*restrict q;
register ssize_t
i,
x;
ssize_t
center,
u,
v;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t)
(height/2L),image->columns+width,height,exception);
q=QueueCacheViewAuthenticPixels(threshold_view,0,y,threshold_image->columns,
1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) GetPixelChannels(image)*(image->columns+width)*(height/2L)+
GetPixelChannels(image)*(width/2);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait threshold_traits=GetPixelChannelTraits(threshold_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(threshold_traits == UndefinedPixelTrait))
continue;
if (((threshold_traits & CopyPixelTrait) != 0) ||
(GetPixelReadMask(image,p) == 0))
{
SetPixelChannel(threshold_image,channel,p[center+i],q);
continue;
}
pixels=p;
channel_bias[channel]=0.0;
channel_sum[channel]=0.0;
for (v=0; v < (ssize_t) height; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
if (u == (ssize_t) (width-1))
channel_bias[channel]+=pixels[i];
channel_sum[channel]+=pixels[i];
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
}
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
mean;
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait threshold_traits=GetPixelChannelTraits(threshold_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(threshold_traits == UndefinedPixelTrait))
continue;
if (((threshold_traits & CopyPixelTrait) != 0) ||
(GetPixelReadMask(image,p) == 0))
{
SetPixelChannel(threshold_image,channel,p[center+i],q);
continue;
}
channel_sum[channel]-=channel_bias[channel];
channel_bias[channel]=0.0;
pixels=p;
for (v=0; v < (ssize_t) height; v++)
{
channel_bias[channel]+=pixels[i];
pixels+=(width-1)*GetPixelChannels(image);
channel_sum[channel]+=pixels[i];
pixels+=(image->columns-1)*GetPixelChannels(image);
}
mean=(double) (channel_sum[channel]/number_pixels+bias);
SetPixelChannel(threshold_image,channel,(Quantum) ((double)
p[center+i] <= mean ? 0 : QuantumRange),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(threshold_image);
}
if (SyncCacheViewAuthenticPixels(threshold_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_AdaptiveThresholdImage)
#endif
proceed=SetImageProgress(image,AdaptiveThresholdImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
threshold_image->type=image->type;
threshold_view=DestroyCacheView(threshold_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
threshold_image=DestroyImage(threshold_image);
return(threshold_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B i l e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BilevelImage() changes the value of individual pixels based on the
% intensity of each pixel channel. The result is a high-contrast image.
%
% More precisely each channel value of the image is 'thresholded' so that if
% it is equal to or less than the given value it is set to zero, while any
% value greater than that give is set to it maximum or QuantumRange.
%
% This function is what is used to implement the "-threshold" operator for
% the command line API.
%
% If the default channel setting is given the image is thresholded using just
% the gray 'intensity' of the image, rather than the individual channels.
%
% The format of the BilevelImage method is:
%
% MagickBooleanType BilevelImage(Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: define the threshold values.
%
% o exception: return any errors or warnings in this structure.
%
% Aside: You can get the same results as operator using LevelImages()
% with the 'threshold' value for both the black_point and the white_point.
%
*/
MagickExport MagickBooleanType BilevelImage(Image *image,const double threshold,
ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
/*
Bilevel threshold image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
register ssize_t
i;
if (GetPixelReadMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
pixel=GetPixelIntensity(image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (image->channel_mask != DefaultChannels)
pixel=(double) q[i];
q[i]=(Quantum) (pixel <= threshold ? 0 : QuantumRange);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_BilevelImage)
#endif
proceed=SetImageProgress(image,ThresholdImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B l a c k T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BlackThresholdImage() is like ThresholdImage() but forces all pixels below
% the threshold into black while leaving all pixels at or above the threshold
% unchanged.
%
% The format of the BlackThresholdImage method is:
%
% MagickBooleanType BlackThresholdImage(Image *image,
% const char *threshold,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: define the threshold value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType BlackThresholdImage(Image *image,
const char *thresholds,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
threshold;
MagickStatusType
flags;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (thresholds == (const char *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
GetPixelInfo(image,&threshold);
flags=ParseGeometry(thresholds,&geometry_info);
threshold.red=geometry_info.rho;
threshold.green=geometry_info.rho;
threshold.blue=geometry_info.rho;
threshold.black=geometry_info.rho;
threshold.alpha=100.0;
if ((flags & SigmaValue) != 0)
threshold.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
threshold.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
threshold.alpha=geometry_info.psi;
if (threshold.colorspace == CMYKColorspace)
{
if ((flags & PsiValue) != 0)
threshold.black=geometry_info.psi;
if ((flags & ChiValue) != 0)
threshold.alpha=geometry_info.chi;
}
if ((flags & PercentValue) != 0)
{
threshold.red*=(MagickRealType) (QuantumRange/100.0);
threshold.green*=(MagickRealType) (QuantumRange/100.0);
threshold.blue*=(MagickRealType) (QuantumRange/100.0);
threshold.black*=(MagickRealType) (QuantumRange/100.0);
threshold.alpha*=(MagickRealType) (QuantumRange/100.0);
}
/*
White threshold image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
register ssize_t
i;
if (GetPixelReadMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
pixel=GetPixelIntensity(image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (image->channel_mask != DefaultChannels)
pixel=(double) q[i];
if (pixel <= GetPixelInfoChannel(&threshold,channel))
q[i]=(Quantum) 0;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_BlackThresholdImage)
#endif
proceed=SetImageProgress(image,ThresholdImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l a m p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClampImage() set each pixel whose value is below zero to zero and any the
% pixel whose value is above the quantum range to the quantum range (e.g.
% 65535) otherwise the pixel value remains unchanged.
%
% The format of the ClampImage method is:
%
% MagickBooleanType ClampImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Quantum ClampPixel(const MagickRealType value)
{
#if !defined(MAGICKCORE_HDRI_SUPPORT)
return((Quantum) value);
#else
if (value < 0.0f)
return(0.0);
if (value >= (MagickRealType) QuantumRange)
return((Quantum) QuantumRange);
return(value);
#endif
}
MagickExport MagickBooleanType ClampImage(Image *image,ExceptionInfo *exception)
{
#define ClampImageTag "Clamp/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
register PixelInfo
*restrict q;
q=image->colormap;
for (i=0; i < (ssize_t) image->colors; i++)
{
q->red=(double) ClampPixel(q->red);
q->green=(double) ClampPixel(q->green);
q->blue=(double) ClampPixel(q->blue);
q->alpha=(double) ClampPixel(q->alpha);
q++;
}
return(SyncImage(image,exception));
}
/*
Clamp image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelReadMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampPixel(q[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ClampImage)
#endif
proceed=SetImageProgress(image,ClampImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y T h r e s h o l d M a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyThresholdMap() de-allocate the given ThresholdMap
%
% The format of the ListThresholdMaps method is:
%
% ThresholdMap *DestroyThresholdMap(Threshold *map)
%
% A description of each parameter follows.
%
% o map: Pointer to the Threshold map to destroy
%
*/
MagickExport ThresholdMap *DestroyThresholdMap(ThresholdMap *map)
{
assert(map != (ThresholdMap *) NULL);
if (map->map_id != (char *) NULL)
map->map_id=DestroyString(map->map_id);
if (map->description != (char *) NULL)
map->description=DestroyString(map->description);
if (map->levels != (ssize_t *) NULL)
map->levels=(ssize_t *) RelinquishMagickMemory(map->levels);
map=(ThresholdMap *) RelinquishMagickMemory(map);
return(map);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t T h r e s h o l d M a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetThresholdMap() loads and searches one or more threshold map files for the
% map matching the given name or alias.
%
% The format of the GetThresholdMap method is:
%
% ThresholdMap *GetThresholdMap(const char *map_id,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o map_id: ID of the map to look for.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ThresholdMap *GetThresholdMap(const char *map_id,
ExceptionInfo *exception)
{
const StringInfo
*option;
LinkedListInfo
*options;
ThresholdMap
*map;
map=GetThresholdMapFile(MinimalThresholdMap,"built-in",map_id,exception);
if (map != (ThresholdMap *) NULL)
return(map);
options=GetConfigureOptions(ThresholdsFilename,exception);
option=(const StringInfo *) GetNextValueInLinkedList(options);
while (option != (const StringInfo *) NULL)
{
map=GetThresholdMapFile((const char *) GetStringInfoDatum(option),
GetStringInfoPath(option),map_id,exception);
if (map != (ThresholdMap *) NULL)
break;
option=(const StringInfo *) GetNextValueInLinkedList(options);
}
options=DestroyConfigureOptions(options);
return(map);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t T h r e s h o l d M a p F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetThresholdMapFile() look for a given threshold map name or alias in the
% given XML file data, and return the allocated the map when found.
%
% The format of the ListThresholdMaps method is:
%
% ThresholdMap *GetThresholdMap(const char *xml,const char *filename,
% const char *map_id,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o xml: The threshold map list in XML format.
%
% o filename: The threshold map XML filename.
%
% o map_id: ID of the map to look for in XML list.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ThresholdMap *GetThresholdMapFile(const char *xml,const char *filename,
const char *map_id,ExceptionInfo *exception)
{
char
*p;
const char
*attribute,
*content;
double
value;
register ssize_t
i;
ThresholdMap
*map;
XMLTreeInfo
*description,
*levels,
*threshold,
*thresholds;
(void) LogMagickEvent(ConfigureEvent,GetMagickModule(),
"Loading threshold map file \"%s\" ...",filename);
map=(ThresholdMap *) NULL;
thresholds=NewXMLTree(xml,exception);
if (thresholds == (XMLTreeInfo *) NULL)
return(map);
for (threshold=GetXMLTreeChild(thresholds,"threshold");
threshold != (XMLTreeInfo *) NULL;
threshold=GetNextXMLTreeTag(threshold))
{
attribute=GetXMLTreeAttribute(threshold,"map");
if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0))
break;
attribute=GetXMLTreeAttribute(threshold,"alias");
if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0))
break;
}
if (threshold == (XMLTreeInfo *) NULL)
{
thresholds=DestroyXMLTree(thresholds);
return(map);
}
description=GetXMLTreeChild(threshold,"description");
if (description == (XMLTreeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingElement", "<description>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
return(map);
}
levels=GetXMLTreeChild(threshold,"levels");
if (levels == (XMLTreeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingElement", "<levels>, map \"%s\"", map_id);
thresholds=DestroyXMLTree(thresholds);
return(map);
}
map=(ThresholdMap *) AcquireMagickMemory(sizeof(ThresholdMap));
if (map == (ThresholdMap *) NULL)
ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireThresholdMap");
map->map_id=(char *) NULL;
map->description=(char *) NULL;
map->levels=(ssize_t *) NULL;
attribute=GetXMLTreeAttribute(threshold,"map");
if (attribute != (char *) NULL)
map->map_id=ConstantString(attribute);
content=GetXMLTreeContent(description);
if (content != (char *) NULL)
map->description=ConstantString(content);
attribute=GetXMLTreeAttribute(levels,"width");
if (attribute == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<levels width>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->width=StringToUnsignedLong(attribute);
if (map->width == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute", "<levels width>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
attribute=GetXMLTreeAttribute(levels,"height");
if (attribute == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<levels height>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->height=StringToUnsignedLong(attribute);
if (map->height == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute", "<levels height>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
attribute=GetXMLTreeAttribute(levels,"divisor");
if (attribute == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<levels divisor>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->divisor=(ssize_t) StringToLong(attribute);
if (map->divisor < 2)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute", "<levels divisor>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
content=GetXMLTreeContent(levels);
if (content == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingContent", "<levels>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->levels=(ssize_t *) AcquireQuantumMemory((size_t) map->width,map->height*
sizeof(*map->levels));
if (map->levels == (ssize_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireThresholdMap");
for (i=0; i < (ssize_t) (map->width*map->height); i++)
{
map->levels[i]=(ssize_t) strtol(content,&p,10);
if (p == content)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidContent", "<level> too few values, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
if ((map->levels[i] < 0) || (map->levels[i] > map->divisor))
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidContent", "<level> %.20g out of range, map \"%s\"",
(double) map->levels[i],map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
content=p;
}
value=(double) strtol(content,&p,10);
(void) value;
if (p != content)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidContent", "<level> too many values, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
thresholds=DestroyXMLTree(thresholds);
return(map);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ L i s t T h r e s h o l d M a p F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ListThresholdMapFile() lists the threshold maps and their descriptions
% in the given XML file data.
%
% The format of the ListThresholdMaps method is:
%
% MagickBooleanType ListThresholdMaps(FILE *file,const char*xml,
% const char *filename,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o file: An pointer to the output FILE.
%
% o xml: The threshold map list in XML format.
%
% o filename: The threshold map XML filename.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickBooleanType ListThresholdMapFile(FILE *file,const char *xml,
const char *filename,ExceptionInfo *exception)
{
const char
*alias,
*content,
*map;
XMLTreeInfo
*description,
*threshold,
*thresholds;
assert( xml != (char *)NULL );
assert( file != (FILE *)NULL );
(void) LogMagickEvent(ConfigureEvent,GetMagickModule(),
"Loading threshold map file \"%s\" ...",filename);
thresholds=NewXMLTree(xml,exception);
if ( thresholds == (XMLTreeInfo *)NULL )
return(MagickFalse);
(void) FormatLocaleFile(file,"%-16s %-12s %s\n","Map","Alias","Description");
(void) FormatLocaleFile(file,
"----------------------------------------------------\n");
threshold=GetXMLTreeChild(thresholds,"threshold");
for ( ; threshold != (XMLTreeInfo *) NULL;
threshold=GetNextXMLTreeTag(threshold))
{
map=GetXMLTreeAttribute(threshold,"map");
if (map == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<map>");
thresholds=DestroyXMLTree(thresholds);
return(MagickFalse);
}
alias=GetXMLTreeAttribute(threshold,"alias");
description=GetXMLTreeChild(threshold,"description");
if (description == (XMLTreeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingElement", "<description>, map \"%s\"",map);
thresholds=DestroyXMLTree(thresholds);
return(MagickFalse);
}
content=GetXMLTreeContent(description);
if (content == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingContent", "<description>, map \"%s\"", map);
thresholds=DestroyXMLTree(thresholds);
return(MagickFalse);
}
(void) FormatLocaleFile(file,"%-16s %-12s %s\n",map,alias ? alias : "",
content);
}
thresholds=DestroyXMLTree(thresholds);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i s t T h r e s h o l d M a p s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ListThresholdMaps() lists the threshold maps and their descriptions
% as defined by "threshold.xml" to a file.
%
% The format of the ListThresholdMaps method is:
%
% MagickBooleanType ListThresholdMaps(FILE *file,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o file: An pointer to the output FILE.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ListThresholdMaps(FILE *file,
ExceptionInfo *exception)
{
const StringInfo
*option;
LinkedListInfo
*options;
MagickStatusType
status;
status=MagickTrue;
if (file == (FILE *) NULL)
file=stdout;
options=GetConfigureOptions(ThresholdsFilename,exception);
(void) FormatLocaleFile(file,
"\n Threshold Maps for Ordered Dither Operations\n");
option=(const StringInfo *) GetNextValueInLinkedList(options);
while (option != (const StringInfo *) NULL)
{
(void) FormatLocaleFile(file,"\nPath: %s\n\n",GetStringInfoPath(option));
status&=ListThresholdMapFile(file,(const char *) GetStringInfoDatum(option),
GetStringInfoPath(option),exception);
option=(const StringInfo *) GetNextValueInLinkedList(options);
}
options=DestroyConfigureOptions(options);
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O r d e r e d P o s t e r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OrderedPosterizeImage() will perform a ordered dither based on a number
% of pre-defined dithering threshold maps, but over multiple intensity
% levels, which can be different for different channels, according to the
% input argument.
%
% The format of the OrderedPosterizeImage method is:
%
% MagickBooleanType OrderedPosterizeImage(Image *image,
% const char *threshold_map,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold_map: A string containing the name of the threshold dither
% map to use, followed by zero or more numbers representing the number
% of color levels tho dither between.
%
% Any level number less than 2 will be equivalent to 2, and means only
% binary dithering will be applied to each color channel.
%
% No numbers also means a 2 level (bitmap) dither will be applied to all
% channels, while a single number is the number of levels applied to each
% channel in sequence. More numbers will be applied in turn to each of
% the color channels.
%
% For example: "o3x3,6" will generate a 6 level posterization of the
% image with a ordered 3x3 diffused pixel dither being applied between
% each level. While checker,8,8,4 will produce a 332 colormaped image
% with only a single checkerboard hash pattern (50% grey) between each
% color level, to basically double the number of color levels with
% a bare minimim of dithering.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType OrderedPosterizeImage(Image *image,
const char *threshold_map,ExceptionInfo *exception)
{
#define DitherImageTag "Dither/Image"
CacheView
*image_view;
char
token[MaxTextExtent];
const char
*p;
double
levels[CompositePixelChannel];
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
ThresholdMap
*map;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if (threshold_map == (const char *) NULL)
return(MagickTrue);
p=(char *) threshold_map;
while (((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) &&
(*p != '\0'))
p++;
threshold_map=p;
while (((isspace((int) ((unsigned char) *p)) == 0) && (*p != ',')) &&
(*p != '\0'))
{
if ((p-threshold_map) >= (MaxTextExtent-1))
break;
token[p-threshold_map]=(*p);
p++;
}
token[p-threshold_map]='\0';
map=GetThresholdMap(token,exception);
if (map == (ThresholdMap *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","ordered-dither",threshold_map);
return(MagickFalse);
}
for (i=0; i < MaxPixelChannels; i++)
levels[i]=2.0;
p=strchr((char *) threshold_map,',');
if ((p != (char *) NULL) && (isdigit((int) ((unsigned char) *(++p))) != 0))
for (i=0; (*p != '\0') && (i < MaxPixelChannels); i++)
{
GetMagickToken(p,&p,token);
if (*token == ',')
GetMagickToken(p,&p,token);
levels[i]=StringToDouble(token,(char **) NULL);
}
for (i=0; i < MaxPixelChannels; i++)
if (fabs(levels[i]) >= 1)
levels[i]-=1.0;
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
ssize_t
n;
n=0;
if (GetPixelReadMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
ssize_t
level,
threshold;
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (fabs(levels[n++]) < MagickEpsilon)
continue;
threshold=(ssize_t) (QuantumScale*q[i]*(levels[n]*(map->divisor-1)+1));
level=threshold/(map->divisor-1);
threshold-=level*(map->divisor-1);
q[i]=ClampToQuantum((double) (level+(threshold >=
map->levels[(x % map->width)+map->width*(y % map->height)]))*
QuantumRange/levels[n]);
n++;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_OrderedPosterizeImage)
#endif
proceed=SetImageProgress(image,DitherImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
map=DestroyThresholdMap(map);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P e r c e p t i b l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PerceptibleImage() set each pixel whose value is less than |epsilon| to
% epsilon or -epsilon (whichever is closer) otherwise the pixel value remains
% unchanged.
%
% The format of the PerceptibleImage method is:
%
% MagickBooleanType PerceptibleImage(Image *image,const double epsilon,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o epsilon: the epsilon threshold (e.g. 1.0e-9).
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Quantum PerceptibleThreshold(const Quantum quantum,
const double epsilon)
{
double
sign;
sign=(double) quantum < 0.0 ? -1.0 : 1.0;
if ((sign*quantum) >= epsilon)
return(quantum);
return((Quantum) (sign*epsilon));
}
MagickExport MagickBooleanType PerceptibleImage(Image *image,
const double epsilon,ExceptionInfo *exception)
{
#define PerceptibleImageTag "Perceptible/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
register PixelInfo
*restrict q;
q=image->colormap;
for (i=0; i < (ssize_t) image->colors; i++)
{
q->red=(double) PerceptibleThreshold(ClampToQuantum(q->red),
epsilon);
q->green=(double) PerceptibleThreshold(ClampToQuantum(q->green),
epsilon);
q->blue=(double) PerceptibleThreshold(ClampToQuantum(q->blue),
epsilon);
q->alpha=(double) PerceptibleThreshold(ClampToQuantum(q->alpha),
epsilon);
q++;
}
return(SyncImage(image,exception));
}
/*
Perceptible image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelReadMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PerceptibleThreshold(q[i],epsilon);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_PerceptibleImage)
#endif
proceed=SetImageProgress(image,PerceptibleImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R a n d o m T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RandomThresholdImage() changes the value of individual pixels based on the
% intensity of each pixel compared to a random threshold. The result is a
% low-contrast, two color image.
%
% The format of the RandomThresholdImage method is:
%
% MagickBooleanType RandomThresholdImage(Image *image,
% const char *thresholds,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o thresholds: a geometry string containing low,high thresholds. If the
% string contains 2x2, 3x3, or 4x4, an ordered dither of order 2, 3, or 4
% is performed instead.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RandomThresholdImage(Image *image,
const char *thresholds,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
double
min_threshold,
max_threshold;
GeometryInfo
geometry_info;
MagickStatusType
flags;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
threshold;
RandomInfo
**restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if (thresholds == (const char *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
GetPixelInfo(image,&threshold);
min_threshold=0.0;
max_threshold=(double) QuantumRange;
flags=ParseGeometry(thresholds,&geometry_info);
min_threshold=geometry_info.rho;
max_threshold=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
max_threshold=min_threshold;
if (strchr(thresholds,'%') != (char *) NULL)
{
max_threshold*=(double) (0.01*QuantumRange);
min_threshold*=(double) (0.01*QuantumRange);
}
/*
Random threshold image.
*/
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#endif
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelReadMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
threshold;
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if ((double) q[i] < min_threshold)
threshold=min_threshold;
else
if ((double) q[i] > max_threshold)
threshold=max_threshold;
else
threshold=(double) (QuantumRange*
GetPseudoRandomValue(random_info[id]));
q[i]=(double) q[i] <= threshold ? 0 : QuantumRange;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_RandomThresholdImage)
#endif
proceed=SetImageProgress(image,ThresholdImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W h i t e T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WhiteThresholdImage() is like ThresholdImage() but forces all pixels above
% the threshold into white while leaving all pixels at or below the threshold
% unchanged.
%
% The format of the WhiteThresholdImage method is:
%
% MagickBooleanType WhiteThresholdImage(Image *image,
% const char *threshold,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: Define the threshold value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType WhiteThresholdImage(Image *image,
const char *thresholds,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
threshold;
MagickStatusType
flags;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (thresholds == (const char *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) TransformImageColorspace(image,sRGBColorspace,exception);
GetPixelInfo(image,&threshold);
flags=ParseGeometry(thresholds,&geometry_info);
threshold.red=geometry_info.rho;
threshold.green=geometry_info.rho;
threshold.blue=geometry_info.rho;
threshold.black=geometry_info.rho;
threshold.alpha=100.0;
if ((flags & SigmaValue) != 0)
threshold.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
threshold.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
threshold.alpha=geometry_info.psi;
if (threshold.colorspace == CMYKColorspace)
{
if ((flags & PsiValue) != 0)
threshold.black=geometry_info.psi;
if ((flags & ChiValue) != 0)
threshold.alpha=geometry_info.chi;
}
if ((flags & PercentValue) != 0)
{
threshold.red*=(MagickRealType) (QuantumRange/100.0);
threshold.green*=(MagickRealType) (QuantumRange/100.0);
threshold.blue*=(MagickRealType) (QuantumRange/100.0);
threshold.black*=(MagickRealType) (QuantumRange/100.0);
threshold.alpha*=(MagickRealType) (QuantumRange/100.0);
}
/*
White threshold image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
register ssize_t
i;
if (GetPixelReadMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
pixel=GetPixelIntensity(image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (image->channel_mask != DefaultChannels)
pixel=(double) q[i];
if (pixel > GetPixelInfoChannel(&threshold,channel))
q[i]=QuantumRange;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_WhiteThresholdImage)
#endif
proceed=SetImageProgress(image,ThresholdImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
ejercicio3.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
main(){
int n=9, i,a,b[n];
for(i=0;i<n;i++) b[i]=-1;
#pragma omp parallel
{
#pragma omp single
{
printf("Introduce valor de inicializacion a:");
scanf("%d",&a);
printf("Single ejecutada por el thread%d\n",omp_get_thread_num());
}
#pragma omp for
for(i=0;i<n;i++) b[i]=a;
#pragma omp master
{
for(i=0;i<n;i++){
printf("Master ejecutada por el thread%d\n",omp_get_thread_num());
printf("b[%d]=%d\t",i,b[i]);
}
}
printf("\n");
}
} |
GB_binop__isne_uint32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isne_uint32)
// A.*B function (eWiseMult): GB (_AemultB_08__isne_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__isne_uint32)
// A.*B function (eWiseMult): GB (_AemultB_04__isne_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isne_uint32)
// A*D function (colscale): GB (_AxD__isne_uint32)
// D*A function (rowscale): GB (_DxB__isne_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__isne_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__isne_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isne_uint32)
// C=scalar+B GB (_bind1st__isne_uint32)
// C=scalar+B' GB (_bind1st_tran__isne_uint32)
// C=A+scalar GB (_bind2nd__isne_uint32)
// C=A'+scalar GB (_bind2nd_tran__isne_uint32)
// C type: uint32_t
// A type: uint32_t
// A pattern? 0
// B type: uint32_t
// B pattern? 0
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISNE || GxB_NO_UINT32 || GxB_NO_ISNE_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__isne_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isne_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isne_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isne_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isne_uint32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isne_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint32_t alpha_scalar ;
uint32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isne_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isne_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isne_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isne_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isne_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isne_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__isne_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__isne_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
hypre_hopscotch_hash.h | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision$
***********************************************************************EHEADER*/
/**
* Hopscotch hash is modified from the code downloaded from
* https://sites.google.com/site/cconcurrencypackage/hopscotch-hashing
* with the following terms of usage
*/
////////////////////////////////////////////////////////////////////////////////
//TERMS OF USAGE
//------------------------------------------------------------------------------
//
// Permission to use, copy, modify and distribute this software and
// its documentation for any purpose is hereby granted without fee,
// provided that due acknowledgments to the authors are provided and
// this permission notice appears in all copies of the software.
// The software is provided "as is". There is no warranty of any kind.
//
//Authors:
// Maurice Herlihy
// Brown University
// and
// Nir Shavit
// Tel-Aviv University
// and
// Moran Tzafrir
// Tel-Aviv University
//
// Date: July 15, 2008.
//
////////////////////////////////////////////////////////////////////////////////
// Programmer : Moran Tzafrir (MoranTza@gmail.com)
// Modified : Jongsoo Park (jongsoo.park@intel.com)
// Oct 1, 2015.
//
////////////////////////////////////////////////////////////////////////////////
#ifndef hypre_HOPSCOTCH_HASH_HEADER
#define hypre_HOPSCOTCH_HASH_HEADER
#include <stdio.h>
#include <limits.h>
#include <assert.h>
#include <math.h>
#ifdef HYPRE_USING_OPENMP
#include <omp.h>
#endif
#include "_hypre_utilities.h"
// Potentially architecture specific features used here:
// __builtin_ffs
// __sync_val_compare_and_swap
#ifdef __cplusplus
extern "C" {
#endif
/******************************************************************************
* This next section of code is here instead of in _hypre_utilities.h to get
* around some portability issues with Visual Studio. By putting it here, we
* can explicitly include this '.h' file in a few files in hypre and compile
* them with C++ instead of C (VS does not support C99 'inline').
******************************************************************************/
#ifdef HYPRE_USING_ATOMIC
static inline HYPRE_Int hypre_compare_and_swap(HYPRE_Int *ptr, HYPRE_Int oldval, HYPRE_Int newval)
{
#if defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__) && (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) > 40100
return __sync_val_compare_and_swap(ptr, oldval, newval);
//#elif defind _MSC_VER
//return _InterlockedCompareExchange((long *)ptr, newval, oldval);
//#elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__)
// JSP: not many compilers have implemented this, so comment out for now
//_Atomic HYPRE_Int *atomic_ptr = ptr;
//atomic_compare_exchange_strong(atomic_ptr, &oldval, newval);
//return oldval;
#endif
}
static inline HYPRE_Int hypre_fetch_and_add(HYPRE_Int *ptr, HYPRE_Int value)
{
#if defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__) && (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) > 40100
return __sync_fetch_and_add(ptr, value);
//#elif defined _MSC_VER
//return _InterlockedExchangeAdd((long *)ptr, value);
//#elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__)
// JSP: not many compilers have implemented this, so comment out for now
//_Atomic HYPRE_Int *atomic_ptr = ptr;
//return atomic_fetch_add(atomic_ptr, value);
#endif
}
#else // !HYPRE_USING_ATOMIC
static inline HYPRE_Int hypre_compare_and_swap(HYPRE_Int *ptr, HYPRE_Int oldval, HYPRE_Int newval)
{
if (*ptr == oldval)
{
*ptr = newval;
return oldval;
}
else return *ptr;
}
static inline HYPRE_Int hypre_fetch_and_add(HYPRE_Int *ptr, HYPRE_Int value)
{
HYPRE_Int oldval = *ptr;
*ptr += value;
return oldval;
}
#endif // !HYPRE_USING_ATOMIC
/******************************************************************************/
// Constants ................................................................
#define HYPRE_HOPSCOTCH_HASH_HOP_RANGE (32)
#define HYPRE_HOPSCOTCH_HASH_INSERT_RANGE (4*1024)
#define HYPRE_HOPSCOTCH_HASH_EMPTY (0)
#define HYPRE_HOPSCOTCH_HASH_BUSY (1)
// Small Utilities ..........................................................
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
static inline HYPRE_Int first_lsb_bit_indx(hypre_uint x)
{
if (0 == x) return -1;
return __builtin_ffs(x) - 1;
}
#endif
/**
* hypre_Hash is adapted from xxHash with the following license.
*/
/*
xxHash - Extremely Fast Hash algorithm
Header File
Copyright (C) 2012-2015, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- xxHash source repository : https://github.com/Cyan4973/xxHash
*/
/***************************************
* Constants
***************************************/
#define HYPRE_XXH_PRIME32_1 2654435761U
#define HYPRE_XXH_PRIME32_2 2246822519U
#define HYPRE_XXH_PRIME32_3 3266489917U
#define HYPRE_XXH_PRIME32_4 668265263U
#define HYPRE_XXH_PRIME32_5 374761393U
#define HYPRE_XXH_PRIME64_1 11400714785074694791ULL
#define HYPRE_XXH_PRIME64_2 14029467366897019727ULL
#define HYPRE_XXH_PRIME64_3 1609587929392839161ULL
#define HYPRE_XXH_PRIME64_4 9650029242287828579ULL
#define HYPRE_XXH_PRIME64_5 2870177450012600261ULL
# define HYPRE_XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))
# define HYPRE_XXH_rotl64(x,r) ((x << r) | (x >> (64 - r)))
#ifdef HYPRE_BIGINT
static inline HYPRE_Int hypre_Hash(HYPRE_Int input)
{
hypre_ulongint h64 = HYPRE_XXH_PRIME64_5 + sizeof(input);
hypre_ulongint k1 = input;
k1 *= HYPRE_XXH_PRIME64_2;
k1 = HYPRE_XXH_rotl64(k1, 31);
k1 *= HYPRE_XXH_PRIME64_1;
h64 ^= k1;
h64 = HYPRE_XXH_rotl64(h64, 27)*HYPRE_XXH_PRIME64_1 + HYPRE_XXH_PRIME64_4;
h64 ^= h64 >> 33;
h64 *= HYPRE_XXH_PRIME64_2;
h64 ^= h64 >> 29;
h64 *= HYPRE_XXH_PRIME64_3;
h64 ^= h64 >> 32;
#ifndef NDEBUG
if (HYPRE_HOPSCOTCH_HASH_EMPTY == h64) {
hypre_printf("hash(%lld) = %d\n", h64, HYPRE_HOPSCOTCH_HASH_EMPTY);
assert(HYPRE_HOPSCOTCH_HASH_EMPTY != h64);
}
#endif
return h64;
}
#else
static inline HYPRE_Int hypre_Hash(HYPRE_Int input)
{
hypre_uint h32 = HYPRE_XXH_PRIME32_5 + sizeof(input);
// 1665863975 is added to input so that
// only -1073741824 gives HYPRE_HOPSCOTCH_HASH_EMPTY.
// Hence, we're fine as long as key is non-negative.
h32 += (input + 1665863975)*HYPRE_XXH_PRIME32_3;
h32 = HYPRE_XXH_rotl32(h32, 17)*HYPRE_XXH_PRIME32_4;
h32 ^= h32 >> 15;
h32 *= HYPRE_XXH_PRIME32_2;
h32 ^= h32 >> 13;
h32 *= HYPRE_XXH_PRIME32_3;
h32 ^= h32 >> 16;
//assert(HYPRE_HOPSCOTCH_HASH_EMPTY != h32);
return h32;
}
#endif
static inline void hypre_UnorderedIntSetFindCloserFreeBucket( hypre_UnorderedIntSet *s,
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment* start_seg,
#endif
HYPRE_Int *free_bucket,
HYPRE_Int *free_dist )
{
HYPRE_Int move_bucket = *free_bucket - (HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1);
HYPRE_Int move_free_dist;
for (move_free_dist = HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1; move_free_dist > 0; --move_free_dist)
{
hypre_uint start_hop_info = s->hopInfo[move_bucket];
HYPRE_Int move_new_free_dist = -1;
hypre_uint mask = 1;
HYPRE_Int i;
for (i = 0; i < move_free_dist; ++i, mask <<= 1)
{
if (mask & start_hop_info)
{
move_new_free_dist = i;
break;
}
}
if (-1 != move_new_free_dist)
{
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment* move_segment = &(s->segments[move_bucket & s->segmentMask]);
if(start_seg != move_segment)
omp_set_lock(&move_segment->lock);
#endif
if (start_hop_info == s->hopInfo[move_bucket])
{
// new_free_bucket -> free_bucket and empty new_free_bucket
HYPRE_Int new_free_bucket = move_bucket + move_new_free_dist;
s->key[*free_bucket] = s->key[new_free_bucket];
s->hash[*free_bucket] = s->hash[new_free_bucket];
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
++move_segment->timestamp;
#pragma omp flush
#endif
s->hopInfo[move_bucket] |= (1U << move_free_dist);
s->hopInfo[move_bucket] &= ~(1U << move_new_free_dist);
*free_bucket = new_free_bucket;
*free_dist -= move_free_dist - move_new_free_dist;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if(start_seg != move_segment)
omp_unset_lock(&move_segment->lock);
#endif
return;
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if(start_seg != move_segment)
omp_unset_lock(&move_segment->lock);
#endif
}
++move_bucket;
}
*free_bucket = -1;
*free_dist = 0;
}
static inline void hypre_UnorderedIntMapFindCloserFreeBucket( hypre_UnorderedIntMap *m,
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment* start_seg,
#endif
hypre_HopscotchBucket** free_bucket,
HYPRE_Int* free_dist)
{
hypre_HopscotchBucket* move_bucket = *free_bucket - (HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1);
HYPRE_Int move_free_dist;
for (move_free_dist = HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1; move_free_dist > 0; --move_free_dist)
{
hypre_uint start_hop_info = move_bucket->hopInfo;
HYPRE_Int move_new_free_dist = -1;
hypre_uint mask = 1;
HYPRE_Int i;
for (i = 0; i < move_free_dist; ++i, mask <<= 1)
{
if (mask & start_hop_info)
{
move_new_free_dist = i;
break;
}
}
if (-1 != move_new_free_dist)
{
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_HopscotchSegment* move_segment = &(m->segments[(move_bucket - m->table) & m->segmentMask]);
if (start_seg != move_segment)
omp_set_lock(&move_segment->lock);
#endif
if (start_hop_info == move_bucket->hopInfo)
{
// new_free_bucket -> free_bucket and empty new_free_bucket
hypre_HopscotchBucket* new_free_bucket = move_bucket + move_new_free_dist;
(*free_bucket)->data = new_free_bucket->data;
(*free_bucket)->key = new_free_bucket->key;
(*free_bucket)->hash = new_free_bucket->hash;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
++move_segment->timestamp;
#pragma omp flush
#endif
move_bucket->hopInfo |= (1U << move_free_dist);
move_bucket->hopInfo &= ~(1U << move_new_free_dist);
*free_bucket = new_free_bucket;
*free_dist -= move_free_dist - move_new_free_dist;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if(start_seg != move_segment)
omp_unset_lock(&move_segment->lock);
#endif
return;
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if(start_seg != move_segment)
omp_unset_lock(&move_segment->lock);
#endif
}
++move_bucket;
}
*free_bucket = NULL;
*free_dist = 0;
}
void hypre_UnorderedIntSetCreate( hypre_UnorderedIntSet *s,
HYPRE_Int inCapacity,
HYPRE_Int concurrencyLevel);
void hypre_UnorderedIntMapCreate( hypre_UnorderedIntMap *m,
HYPRE_Int inCapacity,
HYPRE_Int concurrencyLevel);
void hypre_UnorderedIntSetDestroy( hypre_UnorderedIntSet *s );
void hypre_UnorderedIntMapDestroy( hypre_UnorderedIntMap *m );
// Query Operations .........................................................
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
static inline HYPRE_Int hypre_UnorderedIntSetContains( hypre_UnorderedIntSet *s,
HYPRE_Int key )
{
//CALCULATE HASH ..........................
HYPRE_Int hash = hypre_Hash(key);
//CHECK IF ALREADY CONTAIN ................
hypre_HopscotchSegment *segment = &s->segments[hash & s->segmentMask];
HYPRE_Int bucket = hash & s->bucketMask;
hypre_uint hopInfo = s->hopInfo[bucket];
if (0 == hopInfo)
return 0;
else if (1 == hopInfo )
{
if (hash == s->hash[bucket] && key == s->key[bucket])
return 1;
else return 0;
}
HYPRE_Int startTimestamp = segment->timestamp;
while (0 != hopInfo)
{
HYPRE_Int i = first_lsb_bit_indx(hopInfo);
HYPRE_Int currElm = bucket + i;
if (hash == s->hash[currElm] && key == s->key[currElm])
return 1;
hopInfo &= ~(1U << i);
}
if (segment->timestamp == startTimestamp)
return 0;
HYPRE_Int i;
for (i = 0; i< HYPRE_HOPSCOTCH_HASH_HOP_RANGE; ++i)
{
if (hash == s->hash[bucket + i] && key == s->key[bucket + i])
return 1;
}
return 0;
}
/**
* @ret -1 if key doesn't exist
*/
static inline HYPRE_Int hypre_UnorderedIntMapGet( hypre_UnorderedIntMap *m,
HYPRE_Int key)
{
//CALCULATE HASH ..........................
HYPRE_Int hash = hypre_Hash(key);
//CHECK IF ALREADY CONTAIN ................
hypre_HopscotchSegment *segment = &m->segments[hash & m->segmentMask];
hypre_HopscotchBucket *elmAry = &(m->table[hash & m->bucketMask]);
hypre_uint hopInfo = elmAry->hopInfo;
if (0 == hopInfo)
return -1;
else if (1 == hopInfo )
{
if (hash == elmAry->hash && key == elmAry->key)
return elmAry->data;
else return -1;
}
HYPRE_Int startTimestamp = segment->timestamp;
while (0 != hopInfo)
{
HYPRE_Int i = first_lsb_bit_indx(hopInfo);
hypre_HopscotchBucket* currElm = elmAry + i;
if (hash == currElm->hash && key == currElm->key)
return currElm->data;
hopInfo &= ~(1U << i);
}
if (segment->timestamp == startTimestamp)
return -1;
hypre_HopscotchBucket *currBucket = &(m->table[hash & m->bucketMask]);
HYPRE_Int i;
for (i = 0; i< HYPRE_HOPSCOTCH_HASH_HOP_RANGE; ++i, ++currBucket)
{
if (hash == currBucket->hash && key == currBucket->key)
return currBucket->data;
}
return -1;
}
#endif
//status Operations .........................................................
static inline HYPRE_Int hypre_UnorderedIntSetSize(hypre_UnorderedIntSet *s)
{
HYPRE_Int counter = 0;
HYPRE_Int n = s->bucketMask + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE;
HYPRE_Int i;
for (i = 0; i < n; ++i)
{
if (HYPRE_HOPSCOTCH_HASH_EMPTY != s->hash[i])
{
++counter;
}
}
return counter;
}
static inline HYPRE_Int hypre_UnorderedIntMapSize(hypre_UnorderedIntMap *m)
{
HYPRE_Int counter = 0;
HYPRE_Int n = m->bucketMask + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE;
HYPRE_Int i;
for (i = 0; i < n; ++i)
{
if( HYPRE_HOPSCOTCH_HASH_EMPTY != m->table[i].hash )
{
++counter;
}
}
return counter;
}
HYPRE_Int *hypre_UnorderedIntSetCopyToArray( hypre_UnorderedIntSet *s, HYPRE_Int *len );
//modification Operations ...................................................
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
static inline void hypre_UnorderedIntSetPut( hypre_UnorderedIntSet *s,
HYPRE_Int key )
{
//CALCULATE HASH ..........................
HYPRE_Int hash = hypre_Hash(key);
//LOCK KEY HASH ENTERY ....................
hypre_HopscotchSegment *segment = &s->segments[hash & s->segmentMask];
omp_set_lock(&segment->lock);
HYPRE_Int bucket = hash&s->bucketMask;
//CHECK IF ALREADY CONTAIN ................
hypre_uint hopInfo = s->hopInfo[bucket];
while (0 != hopInfo)
{
HYPRE_Int i = first_lsb_bit_indx(hopInfo);
HYPRE_Int currElm = bucket + i;
if(hash == s->hash[currElm] && key == s->key[currElm])
{
omp_unset_lock(&segment->lock);
return;
}
hopInfo &= ~(1U << i);
}
//LOOK FOR FREE BUCKET ....................
HYPRE_Int free_bucket = bucket;
HYPRE_Int free_dist = 0;
for ( ; free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE; ++free_dist, ++free_bucket)
{
if( (HYPRE_HOPSCOTCH_HASH_EMPTY == s->hash[free_bucket]) && (HYPRE_HOPSCOTCH_HASH_EMPTY == hypre_compare_and_swap((HYPRE_Int *)&s->hash[free_bucket], (HYPRE_Int)HYPRE_HOPSCOTCH_HASH_EMPTY, (HYPRE_Int)HYPRE_HOPSCOTCH_HASH_BUSY)) )
break;
}
//PLACE THE NEW KEY .......................
if (free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE)
{
do
{
if (free_dist < HYPRE_HOPSCOTCH_HASH_HOP_RANGE)
{
s->key[free_bucket] = key;
s->hash[free_bucket] = hash;
s->hopInfo[bucket] |= 1U << free_dist;
omp_unset_lock(&segment->lock);
return;
}
hypre_UnorderedIntSetFindCloserFreeBucket(s,
segment,
&free_bucket, &free_dist);
} while (-1 != free_bucket);
}
//NEED TO RESIZE ..........................
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ERROR - RESIZE is not implemented\n");
/*fprintf(stderr, "ERROR - RESIZE is not implemented\n");*/
exit(1);
return;
}
static inline HYPRE_Int hypre_UnorderedIntMapPutIfAbsent( hypre_UnorderedIntMap *m, HYPRE_Int key, HYPRE_Int data)
{
//CALCULATE HASH ..........................
HYPRE_Int hash = hypre_Hash(key);
//LOCK KEY HASH ENTERY ....................
hypre_HopscotchSegment *segment = &m->segments[hash & m->segmentMask];
omp_set_lock(&segment->lock);
hypre_HopscotchBucket* startBucket = &(m->table[hash & m->bucketMask]);
//CHECK IF ALREADY CONTAIN ................
hypre_uint hopInfo = startBucket->hopInfo;
while (0 != hopInfo)
{
HYPRE_Int i = first_lsb_bit_indx(hopInfo);
hypre_HopscotchBucket* currElm = startBucket + i;
if (hash == currElm->hash && key == currElm->key)
{
HYPRE_Int rc = currElm->data;
omp_unset_lock(&segment->lock);
return rc;
}
hopInfo &= ~(1U << i);
}
//LOOK FOR FREE BUCKET ....................
hypre_HopscotchBucket* free_bucket = startBucket;
HYPRE_Int free_dist = 0;
for ( ; free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE; ++free_dist, ++free_bucket)
{
if( (HYPRE_HOPSCOTCH_HASH_EMPTY == free_bucket->hash) && (HYPRE_HOPSCOTCH_HASH_EMPTY == __sync_val_compare_and_swap((HYPRE_Int *)&free_bucket->hash, (HYPRE_Int)HYPRE_HOPSCOTCH_HASH_EMPTY, (HYPRE_Int)HYPRE_HOPSCOTCH_HASH_BUSY)) )
break;
}
//PLACE THE NEW KEY .......................
if (free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE)
{
do
{
if (free_dist < HYPRE_HOPSCOTCH_HASH_HOP_RANGE)
{
free_bucket->data = data;
free_bucket->key = key;
free_bucket->hash = hash;
startBucket->hopInfo |= 1U << free_dist;
omp_unset_lock(&segment->lock);
return HYPRE_HOPSCOTCH_HASH_EMPTY;
}
hypre_UnorderedIntMapFindCloserFreeBucket(m,
segment,
&free_bucket, &free_dist);
} while (NULL != free_bucket);
}
//NEED TO RESIZE ..........................
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ERROR - RESIZE is not implemented\n");
/*fprintf(stderr, "ERROR - RESIZE is not implemented\n");*/
exit(1);
return HYPRE_HOPSCOTCH_HASH_EMPTY;
}
#endif
#ifdef __cplusplus
} // extern "C"
#endif
#endif // hypre_HOPSCOTCH_HASH_HEADER
|
allAlgos.c | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <omp.h>
#include <math.h>
#define INF 99999
#define min(X, Y) (((X) < (Y)) ? (X) : (Y))
//HELPER FUNCTIONS
void printdistances(int **dist, int n) { printf("\nShortest distances between vertices\n");
for(int i = 0; i<n; i++) {
for(int j=0; j< n; j++) {
if(dist[i][j]==INF) printf("INF\t"); else
printf("%d\t", dist[i][j]);
}
printf("\n");
}
}
void readFromArray(int **graph, int n) {
FILE *myFile;
myFile = fopen("graph.txt", "r");
int i, j;
for(i = 0; i < n; i++) {
for(j = 0; j<n; j++) {
fscanf(myFile, "%d, ", &graph[i][j]);
}
}
fclose(myFile);
}
//SHORTEST PATH ALGORITHMS
void dijsk(int **graph, int n, int startnode) {
int **cost,*distance, *pred;
int *visited;
int count,mindistance,nextnode,i,j;
cost = (int **) malloc(n*sizeof(int*));
for(int i = 0; i<n; i++)
cost[i] = (int *) malloc(n*sizeof(int));
distance = (int *) malloc(n*sizeof(int));
pred = (int *) malloc(n*sizeof(int));
visited = (int *) malloc(n*sizeof(int));
for(i=0;i<n;i++)
for(j=0;j<n;j++)
if(graph[i][j]==0)
cost[i][j]=INF;
else
cost[i][j]=graph[i][j];
for(i=0;i<n;i++) {
distance[i]=cost[startnode][i];
pred[i]=startnode;
visited[i]=0;
}
distance[startnode]=0;
visited[startnode]=1;
count=1;
while(count<n) {
mindistance=INF;
//nextnode gives the node at minimum distance
for(i=0;i<n;i++)
if(distance[i]<mindistance&&!visited[i]) {
mindistance=distance[i];
nextnode=i;
}
//check if a better path exists through nextnode
visited[nextnode]=1;
for(i=0;i<n;i++)
if(!visited[i])
if(mindistance+cost[nextnode][i]<distance[i]) {
distance[i]=mindistance+cost[nextnode][i];
pred[i]=nextnode;
}
count++;
}
}
void floydAlgo(int **graph, int n) {
int **distances, sum, i, j, k;
distances = (int **) malloc(n*sizeof(int*));
for(int i = 0; i<n; i++)
distances[i] = (int *) malloc(n*sizeof(int));
for(i = 0; i<n; i++)
for(j=0; j< n; j++)
distances[i][j] = graph[i][j];
for(k=0; k<n; k++) {
for(i = 0; i<n; i++) {
for(j=0; j < n; j++) {
if(i==j)
distances[i][j] = 0;
else {
sum = distances[i][k]+distances[k][j];
if(sum < distances[i][j]) {
distances[i][j] = sum;
}
}
}
}
}
printdistances(distances, n);
free(distances);
}
void parFloydAlgo(int **graph, int n) {
int **distances, k, i, j, sum;
distances = (int **) malloc(n*sizeof(int*));
for(int i = 0; i<n; i++)
distances[i] = (int *) malloc(n*sizeof(int));
for(int i = 0; i<n; i++)
for(int j=0; j< n; j++)
distances[i][j] = graph[i][j];
unsigned int lengthFromItoK, lengthFromItoKAndFromKToJ;
unsigned int numberOfVertices = n;
unsigned int sqrtOfnumberOfThreads = sqrt(omp_get_max_threads());
unsigned int blockSize = numberOfVertices / sqrtOfnumberOfThreads;
unsigned int initialRow, lastRow;
unsigned int initialColumn, lastColumn;
unsigned int threadNumber;
int *kthRow = (int *) malloc(n*sizeof(int));
int *kthColumn = (int *) malloc(n*sizeof(int));
#pragma omp parallel
{
threadNumber = omp_get_thread_num();
printf("core %d\n", threadNumber);
initialRow = (threadNumber / sqrtOfnumberOfThreads) * blockSize;
lastRow = initialRow + blockSize;
initialColumn = (threadNumber % sqrtOfnumberOfThreads) * blockSize;
lastColumn = initialColumn + blockSize;
for(k=0; k<n; k++) {
for (i = 0; i < numberOfVertices; ++i) {
kthRow[i] = distances[k][i];
kthColumn[i] = distances[i][k];
}
for (i = initialRow; i < lastRow; ++i) {
lengthFromItoK = kthColumn[i];
for (j = initialColumn; j < lastColumn; ++j) {
if (i != j && i != k && j != k) {
lengthFromItoKAndFromKToJ = lengthFromItoK + kthRow[j];
printf("%d - %d\n", i, j);
unsigned int minLength = min(lengthFromItoKAndFromKToJ, distances[i][j]);
distances[i][j] = minLength;
}
}
}
//for(i = 0; i<n; i++) {
// for(j=0; j < n; j++) {
// //printf("%d - %d", omp_get_thread_num(), k);
// if(i==j)
// distances[i][j] = 0;
// else {
// sum = distances[i][k]+distances[k][j];
// if(sum < distances[i][j]) {
// distances[i][j] = sum;
// }
// }
// }
//}
}
}
printdistances(distances, n);
free(distances);
}
int main (int argc, char *argv[]) {
int n, **graph;
clock_t t;
double time_taken;
n = atoi(argv[1]);
//Allocate all memory
graph = (int **) malloc(n*sizeof(int*));
for(int i = 0; i<n; i++)
graph[i] = (int *) malloc(n*sizeof(int));
readFromArray(graph, n);
printf("\nFloyd's Algo");
clock_t begin = clock();
//floydAlgo(graph, n);
clock_t end = clock();
double time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
printf("\n\tSingle took: %f seconds\n", time_spent);
clock_t begin2 = clock();
//parFloydAlgo(graph, n);
clock_t end2 = clock();
double time_spent2 = (double)(end2 - begin2) / CLOCKS_PER_SEC;
printf("\n\tParallel took: %f seconds\n", time_spent2);
printf("\nDijkstra's Algo");
t = clock();
/*for(int i = 0; i < n; i++) {
dijsk(graph, n, i);
}*/
t = clock()-t;
time_taken = ((double)t)/CLOCKS_PER_SEC;
printf("\n\tSinlge took: %f seconds\n", time_taken);
t = clock();
#pragma parallel for
for(int i = 0; i < n; i++) {
dijsk(graph[i], n, i);
}
t = clock()-t;
time_taken = ((double)t)/CLOCKS_PER_SEC;
printf("\n\tParallel took: %f seconds\n", time_taken);
free(graph);
return 0;
}
|
halos.c | #include "../comms.h"
#include "../mesh.h"
#include "../umesh.h"
// Enforce reflective boundary conditions on the problem state
void handle_boundary_2d(const int nx, const int ny, Mesh* mesh, double* arr,
const int invert, const int pack) {
START_PROFILING(&comms_profile);
const int pad = mesh->pad;
int* neighbours = mesh->neighbours;
#ifdef MPI
int nmessages = 0;
if (pack) {
// Pack east and west
if (neighbours[EAST] != EDGE) {
#pragma omp parallel for collapse(2)
for (int ii = pad; ii < ny - pad; ++ii) {
for (int dd = 0; dd < pad; ++dd) {
mesh->east_buffer_out[(ii - pad) * pad + dd] =
arr[(ii * nx) + (nx - 2 * pad + dd)];
}
}
non_block_send(mesh->east_buffer_out, (ny - 2 * pad) * pad,
neighbours[EAST], 2, nmessages++);
non_block_recv(mesh->east_buffer_in, (ny - 2 * pad) * pad,
neighbours[EAST], 3, nmessages++);
}
if (neighbours[WEST] != EDGE) {
#pragma omp parallel for collapse(2)
for (int ii = pad; ii < ny - pad; ++ii) {
for (int dd = 0; dd < pad; ++dd) {
mesh->west_buffer_out[(ii - pad) * pad + dd] =
arr[(ii * nx) + (pad + dd)];
}
}
non_block_send(mesh->west_buffer_out, (ny - 2 * pad) * pad,
neighbours[WEST], 3, nmessages++);
non_block_recv(mesh->west_buffer_in, (ny - 2 * pad) * pad,
neighbours[WEST], 2, nmessages++);
}
// Pack north and south
if (neighbours[NORTH] != EDGE) {
#pragma omp parallel for collapse(2)
for (int dd = 0; dd < pad; ++dd) {
for (int jj = pad; jj < nx - pad; ++jj) {
mesh->north_buffer_out[dd * (nx - 2 * pad) + (jj - pad)] =
arr[(ny - 2 * pad + dd) * nx + jj];
}
}
non_block_send(mesh->north_buffer_out, (nx - 2 * pad) * pad,
neighbours[NORTH], 1, nmessages++);
non_block_recv(mesh->north_buffer_in, (nx - 2 * pad) * pad,
neighbours[NORTH], 0, nmessages++);
}
if (neighbours[SOUTH] != EDGE) {
#pragma omp parallel for collapse(2)
for (int dd = 0; dd < pad; ++dd) {
for (int jj = pad; jj < nx - pad; ++jj) {
mesh->south_buffer_out[dd * (nx - 2 * pad) + (jj - pad)] =
arr[(pad + dd) * nx + jj];
}
}
non_block_send(mesh->south_buffer_out, (nx - 2 * pad) * pad,
neighbours[SOUTH], 0, nmessages++);
non_block_recv(mesh->south_buffer_in, (nx - 2 * pad) * pad,
neighbours[SOUTH], 1, nmessages++);
}
wait_on_messages(nmessages);
// Unpack east and west
if (neighbours[WEST] != EDGE) {
#pragma omp parallel for collapse(2)
for (int ii = pad; ii < ny - pad; ++ii) {
for (int dd = 0; dd < pad; ++dd) {
arr[ii * nx + dd] = mesh->west_buffer_in[(ii - pad) * pad + dd];
}
}
}
if (neighbours[EAST] != EDGE) {
#pragma omp parallel for collapse(2)
for (int ii = pad; ii < ny - pad; ++ii) {
for (int dd = 0; dd < pad; ++dd) {
arr[ii * nx + (nx - pad + dd)] =
mesh->east_buffer_in[(ii - pad) * pad + dd];
}
}
}
// Unpack north and south
if (neighbours[NORTH] != EDGE) {
#pragma omp parallel for collapse(2)
for (int dd = 0; dd < pad; ++dd) {
for (int jj = pad; jj < nx - pad; ++jj) {
arr[(ny - pad + dd) * nx + jj] =
mesh->north_buffer_in[dd * (nx - 2 * pad) + (jj - pad)];
}
}
}
if (neighbours[SOUTH] != EDGE) {
#pragma omp parallel for collapse(2)
for (int dd = 0; dd < pad; ++dd) {
for (int jj = pad; jj < nx - pad; ++jj) {
arr[dd * nx + jj] =
mesh->south_buffer_in[dd * (nx - 2 * pad) + (jj - pad)];
}
}
}
}
#endif
// Perform the boundary reflections, potentially with the data updated from
// neighbours
double x_inversion_coeff = (invert == INVERT_X) ? -1.0 : 1.0;
double y_inversion_coeff = (invert == INVERT_Y) ? -1.0 : 1.0;
// Reflect at the north
if (neighbours[NORTH] == EDGE) {
#pragma omp parallel for collapse(2)
for (int dd = 0; dd < pad; ++dd) {
for (int jj = pad; jj < nx - pad; ++jj) {
arr[(ny - pad + dd) * nx + jj] =
y_inversion_coeff * arr[(ny - 1 - pad - dd) * nx + jj];
}
}
}
// reflect at the south
if (neighbours[SOUTH] == EDGE) {
#pragma omp parallel for collapse(2)
for (int dd = 0; dd < pad; ++dd) {
for (int jj = pad; jj < nx - pad; ++jj) {
arr[(pad - 1 - dd) * nx + jj] =
y_inversion_coeff * arr[(pad + dd) * nx + jj];
}
}
}
// reflect at the east
if (neighbours[EAST] == EDGE) {
#pragma omp parallel for collapse(2)
for (int ii = pad; ii < ny - pad; ++ii) {
for (int dd = 0; dd < pad; ++dd) {
arr[ii * nx + (nx - pad + dd)] =
x_inversion_coeff * arr[ii * nx + (nx - 1 - pad - dd)];
}
}
}
if (neighbours[WEST] == EDGE) {
// reflect at the west
#pragma omp parallel for collapse(2)
for (int ii = pad; ii < ny - pad; ++ii) {
for (int dd = 0; dd < pad; ++dd) {
arr[ii * nx + (pad - 1 - dd)] =
x_inversion_coeff * arr[ii * nx + (pad + dd)];
}
}
}
STOP_PROFILING(&comms_profile, __func__);
}
// Enforce reflective boundary conditions on the problem state
void handle_boundary_3d(const int nx, const int ny, const int nz, Mesh* mesh,
double* arr, const int invert, const int pack) {
#if 0
START_PROFILING(&comms_profile);
int* neighbours = mesh->neighbours;
#ifdef MPI
int nmessages = 0;
if(pack) {
// Pack east and west
if(neighbours[EAST] != EDGE) {
#pragma omp parallel for collapse(2)
for(int ii = 0; ii < nz; ++ii) {
for(int jj = 0; jj < ny; ++jj) {
for(int dd = 0; dd < pad; ++dd) {
mesh->east_buffer_out[(ii*ny*pad)+(jj*pad)+(dd)] =
arr[(ii*nx*ny)+(jj*nx)+(nx-2*pad+dd)];
}
}
}
non_block_send(
mesh->east_buffer_out, nz*ny*pad, neighbours[EAST], 2, nmessages++);
non_block_recv(
mesh->east_buffer_in, nz*ny*pad, neighbours[EAST], 3, nmessages++);
}
if(neighbours[WEST] != EDGE) {
#pragma omp parallel for collapse(2)
for(int ii = 0; ii < nz; ++ii) {
for(int jj = 0; jj < ny; ++jj) {
for(int dd = 0; dd < pad; ++dd) {
mesh->west_buffer_out[(ii*ny*pad)+(jj*pad)+(dd)] =
arr[(ii*nx*ny)+(jj*nx)+(pad+dd)];
}
}
}
non_block_send(
mesh->west_buffer_out, nz*ny*pad, neighbours[WEST], 3, nmessages++);
non_block_recv(
mesh->west_buffer_in, nz*ny*pad, neighbours[WEST], 2, nmessages++);
}
// Pack north and south
if(neighbours[NORTH] != EDGE) {
#pragma omp parallel for collapse(2)
for(int ii = 0; ii < nz; ++ii) {
for(int dd = 0; dd < pad; ++dd) {
for(int kk = 0; kk < nx; ++kk) {
mesh->north_buffer_out[(ii*pad*nx)+(dd*nx)+(kk)] =
arr[(ii*nx*ny)+((ny-2*pad+dd)*nx)+(kk)];
}
}
}
non_block_send(
mesh->north_buffer_out, nz*nx*pad, neighbours[NORTH], 1, nmessages++);
non_block_recv(
mesh->north_buffer_in, nz*nx*pad, neighbours[NORTH], 0, nmessages++);
}
if(neighbours[SOUTH] != EDGE) {
#pragma omp parallel for collapse(2)
for(int ii = 0; ii < nz; ++ii) {
for(int dd = 0; dd < pad; ++dd) {
for(int kk = 0; kk < nx; ++kk) {
mesh->south_buffer_out[(ii*pad*nx)+(dd*nx)+(kk)] =
arr[(ii*nx*ny)+((pad+dd)*nx)+(kk)];
}
}
}
non_block_send(
mesh->south_buffer_out, nz*nx*pad, neighbours[SOUTH], 0, nmessages++);
non_block_recv(
mesh->south_buffer_in, nz*nx*pad, neighbours[SOUTH], 1, nmessages++);
}
// Pack front and back
if(neighbours[FRONT] != EDGE) {
#pragma omp parallel for collapse(2)
for(int dd = 0; dd < pad; ++dd) {
for(int jj = 0; jj < ny; ++jj) {
for(int kk = 0; kk < nx; ++kk) {
mesh->front_buffer_out[(dd*nx*ny)+(jj*nx)+(kk)] =
arr[((pad+dd)*nx*ny)+(jj*nx)+(kk)];
}
}
}
non_block_send(
mesh->front_buffer_out, nx*ny*pad, neighbours[FRONT], 4, nmessages++);
non_block_recv(
mesh->front_buffer_in, nx*ny*pad, neighbours[FRONT], 5, nmessages++);
}
if(neighbours[BACK] != EDGE) {
#pragma omp parallel for collapse(2)
for(int dd = 0; dd < pad; ++dd) {
for(int jj = 0; jj < ny; ++jj) {
for(int kk = 0; kk < nx; ++kk) {
mesh->back_buffer_out[(dd*nx*ny)+(jj*nx)+(kk)] =
arr[((nz-2*pad+dd)*nx*ny)+(jj*nx)+(kk)];
}
}
}
non_block_send(
mesh->back_buffer_out, nx*ny*pad, neighbours[BACK], 5, nmessages++);
non_block_recv(
mesh->back_buffer_in, nx*ny*pad, neighbours[BACK], 4, nmessages++);
}
wait_on_messages(nmessages);
// Unpack east and west
if(neighbours[EAST] != EDGE) {
#pragma omp parallel for collapse(2)
for(int ii = 0; ii < nz; ++ii) {
for(int jj = 0; jj < ny; ++jj) {
for(int dd = 0; dd < pad; ++dd) {
arr[(ii*nx*ny)+(jj*nx)+(nx-pad+dd)] =
mesh->east_buffer_in[(ii*ny*pad)+(jj*pad)+(dd)];
}
}
}
}
if(neighbours[WEST] != EDGE) {
#pragma omp parallel for collapse(2)
for(int ii = 0; ii < nz; ++ii) {
for(int jj = 0; jj < ny; ++jj) {
for(int dd = 0; dd < pad; ++dd) {
arr[(ii*nx*ny)+(jj*nx)+dd] =
mesh->west_buffer_in[(ii*ny*pad)+(jj*pad)+(dd)];
}
}
}
}
// Unpack north and south
if(neighbours[NORTH] != EDGE) {
#pragma omp parallel for collapse(2)
for(int ii = 0; ii < nz; ++ii) {
for(int dd = 0; dd < pad; ++dd) {
for(int kk = 0; kk < nx; ++kk) {
arr[(ii*nx*ny)+((ny-pad+dd)*nx)+(kk)] =
mesh->north_buffer_in[(ii*pad*nx)+(dd*nx)+(kk)];
}
}
}
}
if(neighbours[SOUTH] != EDGE) {
#pragma omp parallel for collapse(2)
for(int ii = 0; ii < nz; ++ii) {
for(int dd = 0; dd < pad; ++dd) {
for(int kk = 0; kk < nx; ++kk) {
arr[(ii*nx*ny)+(dd*nx)+(kk)] =
mesh->south_buffer_in[(ii*pad*nx)+(dd*nx)+(kk)];
}
}
}
}
// Unpack front and back
if(neighbours[FRONT] != EDGE) {
#pragma omp parallel for collapse(2)
for(int dd = 0; dd < pad; ++dd) {
for(int jj = 0; jj < ny; ++jj) {
for(int kk = 0; kk < nx; ++kk) {
arr[(dd*nx*ny)+(jj*nx)+(kk)] =
mesh->front_buffer_in[(dd*nx*ny)+(jj*nx)+(kk)];
}
}
}
}
if(neighbours[BACK] != EDGE) {
#pragma omp parallel for collapse(2)
for(int dd = 0; dd < pad; ++dd) {
for(int jj = 0; jj < ny; ++jj) {
for(int kk = 0; kk < nx; ++kk) {
arr[((nz-pad+dd)*nx*ny)+(jj*nx)+(kk)] =
mesh->back_buffer_in[(dd*nx*ny)+(jj*nx)+(kk)];
}
}
}
}
}
#endif
// Perform the boundary reflections, potentially with the data updated from neighbours
double x_inversion_coeff = (invert == INVERT_X) ? -1.0 : 1.0;
double y_inversion_coeff = (invert == INVERT_Y) ? -1.0 : 1.0;
double z_inversion_coeff = (invert == INVERT_Z) ? -1.0 : 1.0;
// Reflect at the east
if(neighbours[EAST] == EDGE) {
#pragma omp parallel for collapse(2)
for(int ii = 0; ii < nz; ++ii) {
for(int jj = 0; jj < ny; ++jj) {
for(int dd = 0; dd < pad; ++dd) {
arr[(ii*nx*ny)+(jj*nx)+(nx-pad+dd)] =
x_inversion_coeff*arr[(ii*nx*ny)+(jj*nx)+(nx-1-pad-dd)];
}
}
}
}
// Reflect at the west
if(neighbours[WEST] == EDGE) {
#pragma omp parallel for collapse(2)
for(int ii = 0; ii < nz; ++ii) {
for(int jj = 0; jj < ny; ++jj) {
for(int dd = 0; dd < pad; ++dd) {
arr[(ii*nx*ny)+(jj*nx)+(pad-1-dd)] =
x_inversion_coeff*arr[(ii*nx*ny)+(jj*nx)+(pad+dd)];
}
}
}
}
// Reflect at north
if(neighbours[NORTH] == EDGE) {
#pragma omp parallel for collapse(2)
for(int ii = 0; ii < nz; ++ii) {
for(int dd = 0; dd < pad; ++dd) {
for(int kk = 0; kk < nx; ++kk) {
arr[(ii*nx*ny)+((ny-pad+dd)*nx)+(kk)] =
y_inversion_coeff*arr[(ii*nx*ny)+((ny-1-pad-dd)*nx)+(kk)];
}
}
}
}
// Reflect at the south
if(neighbours[SOUTH] == EDGE) {
#pragma omp parallel for collapse(2)
for(int ii = 0; ii < nz; ++ii) {
for(int dd = 0; dd < pad; ++dd) {
for(int kk = 0; kk < nx; ++kk) {
arr[(ii*nx*ny)+((pad-1-dd)*nx)+(kk)] =
y_inversion_coeff*arr[(ii*nx*ny)+((pad+dd)*nx)+(kk)];
}
}
}
}
// Reflect at the front
if(neighbours[FRONT] == EDGE) {
#pragma omp parallel for collapse(2)
for(int dd = 0; dd < pad; ++dd) {
for(int jj = 0; jj < ny; ++jj) {
for(int kk = 0; kk < nx; ++kk) {
arr[((pad-1-dd)*nx*ny)+(jj*nx)+(kk)] =
z_inversion_coeff*arr[((pad+dd)*nx*ny)+(jj*nx)+(kk)];
}
}
}
}
// Reflect at the back
if(neighbours[BACK] == EDGE) {
#pragma omp parallel for collapse(2)
for(int dd = 0; dd < pad; ++dd) {
for(int jj = 0; jj < ny; ++jj) {
for(int kk = 0; kk < nx; ++kk) {
arr[((nz-pad+dd)*nx*ny)+(jj*nx)+(kk)] =
z_inversion_coeff*arr[((nz-1-pad-dd)*nx*ny)+(jj*nx)+(kk)];
}
}
}
}
STOP_PROFILING(&comms_profile, __func__);
#endif // if 0
}
// Reflect the node centered velocities on the boundary
void handle_unstructured_reflect(const int nnodes, const int* boundary_index,
const int* boundary_type,
const double* boundary_normal_x,
const double* boundary_normal_y,
double* velocity_x, double* velocity_y) {
#pragma omp parallel for
for (int nn = 0; nn < nnodes; ++nn) {
const int index = boundary_index[(nn)];
if (index == IS_INTERIOR) {
continue;
}
if (boundary_type[(index)] == IS_BOUNDARY) {
// Project the velocity onto the face direction
const double boundary_parallel_x = boundary_normal_y[(index)];
const double boundary_parallel_y = -boundary_normal_x[(index)];
const double vel_dot_parallel = (velocity_x[(nn)] * boundary_parallel_x +
velocity_y[(nn)] * boundary_parallel_y);
velocity_x[(nn)] = boundary_parallel_x * vel_dot_parallel;
velocity_y[(nn)] = boundary_parallel_y * vel_dot_parallel;
} else if (boundary_type[(index)] == IS_CORNER) {
velocity_x[(nn)] = 0.0;
velocity_y[(nn)] = 0.0;
}
}
}
// Reflect the node centered velocities on the boundary
void handle_unstructured_reflect_3d(const int nnodes, const int* boundary_index,
const int* boundary_type,
const double* boundary_normal_x,
const double* boundary_normal_y,
const double* boundary_normal_z,
double* velocity_x, double* velocity_y,
double* velocity_z) {
#pragma omp parallel for
for (int nn = 0; nn < nnodes; ++nn) {
const int index = boundary_index[(nn)];
if (index == IS_INTERIOR) {
continue;
}
if (boundary_type[(index)] == IS_EDGE) {
// The normal here isn't actually a normal but a projection vector
const double ab = (velocity_x[(nn)] * boundary_normal_x[(index)] +
velocity_y[(nn)] * boundary_normal_y[(index)] +
velocity_z[(nn)] * boundary_normal_z[(index)]);
// Project the vector onto the edge line
velocity_x[(nn)] = ab * boundary_normal_x[(index)];
velocity_y[(nn)] = ab * boundary_normal_y[(index)];
velocity_z[(nn)] = ab * boundary_normal_z[(index)];
} else if (boundary_type[(index)] == IS_BOUNDARY) {
// Perform an orthogonal projection, assuming normal vector is normalised
const double un = (velocity_x[(nn)] * boundary_normal_x[(index)] +
velocity_y[(nn)] * boundary_normal_y[(index)] +
velocity_z[(nn)] * boundary_normal_z[(index)]);
velocity_x[(nn)] -= un * boundary_normal_x[(index)];
velocity_y[(nn)] -= un * boundary_normal_y[(index)];
velocity_z[(nn)] -= un * boundary_normal_z[(index)];
} else if (boundary_type[(index)] == IS_CORNER) {
velocity_x[(nn)] = 0.0;
velocity_y[(nn)] = 0.0;
velocity_z[(nn)] = 0.0;
}
}
}
|
CALPHADTieLineConcSolverTernary.h | #ifndef included_CALPHADTieLineConcSolverTernary
#define included_CALPHADTieLineConcSolverTernary
#include "NewtonSolver.h"
#include "datatypes.h"
namespace Thermo4PFM
{
/// solve for equilibrium compositions along a tie line
/// passing through nominal composition
class CALPHADTieLineConcSolverTernary
: public NewtonSolver<5, CALPHADTieLineConcSolverTernary, JacobianDataType>
{
public:
#ifdef HAVE_OPENMP_OFFLOAD
#pragma omp declare target
#endif
/// input x: initial values for cL_0, cL_1, cS_0, cS_1
/// and phase fraction
/// output x: ceqL_0, ceqL_1, ceqS_0, ceqS_1, and phi
int ComputeConcentration(double* const x, const double tol,
const int max_iters, const double alpha = 1.)
{
return NewtonSolver::ComputeSolution(x, tol, max_iters, alpha);
}
/// setup model paramater values to be used by solver,
/// at a given temperature, including nominal composition
/// c0, c1
void setup(const double c0, const double c1, const double RTinv,
const CalphadDataType* const L_AB_L,
const CalphadDataType* const L_AC_L,
const CalphadDataType* const L_BC_L,
const CalphadDataType* const L_AB_S,
const CalphadDataType* const L_AC_S,
const CalphadDataType* const L_BC_S,
const CalphadDataType* const L_ABC_L,
const CalphadDataType* const L_ABC_S, const CalphadDataType* const fA,
const CalphadDataType* const fB, const CalphadDataType* const fC);
/// evaluate RHS of the system of eqautions to solve for
/// specific to this solver
void RHS(const double* const x, double* const fvec);
/// evaluate Jacobian of system of equations
/// specific to this solver
void Jacobian(const double* const x, JacobianDataType** const fjac);
#ifdef HAVE_OPENMP_OFFLOAD
#pragma omp end declare target
#endif
private:
///
/// nominal composition (defining tie line)
///
double conc_[2];
double RTinv_;
double RT_;
///
/// L coefficients for 2 possible phases (L and S)
///
CalphadDataType L_AB_L_[4];
CalphadDataType L_AC_L_[4];
CalphadDataType L_BC_L_[4];
CalphadDataType L_ABC_L_[3];
CalphadDataType L_AB_S_[4];
CalphadDataType L_AC_S_[4];
CalphadDataType L_BC_S_[4];
CalphadDataType L_ABC_S_[3];
///
/// energies of 3 species, in two phase each
///
CalphadDataType fA_[2];
CalphadDataType fB_[2];
CalphadDataType fC_[2];
};
}
#endif
|
diagmm_x_csr_u_row.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_CSR *mat, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number *y, const ALPHA_INT ldy)
{
ALPHA_INT rowC = mat->rows;
ALPHA_INT colC = columns;
ALPHA_INT num_threads = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for (ALPHA_INT r = 0; r < rowC; ++r)
{
for (ALPHA_INT c = 0; c < colC; ++c)
{
alpha_mule(y[index2(r, c, ldy)], beta);
alpha_madde(y[index2(r, c, ldy)], alpha, x[index2(r, c, ldx)]);
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
fused_rowwise_nbit_conversion_ops.h | #pragma once
#include <algorithm>
#include <vector>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
// for param_search_greedy
#include "caffe2/operators/fused_rowwise_nbitfake_conversion_ops.h"
#include "caffe2/perfkernels/fused_nbit_rowwise_conversion.h"
namespace caffe2 {
template <
int BIT_RATE,
typename T,
void (*convert)(float* dst, const T* src, size_t N),
bool GREEDY = false>
class FloatToFusedNBitRowwiseQuantizedOp final : public Operator<CPUContext> {
public:
FloatToFusedNBitRowwiseQuantizedOp(const OperatorDef& def, Workspace* ws)
: Operator<CPUContext>(def, ws) {}
~FloatToFusedNBitRowwiseQuantizedOp() override {}
bool RunOnDevice() override {
CAFFE_ENFORCE(internal::is_little_endian(), "Unsupported endianness");
const auto& input = Input(DATA_FLOAT);
CAFFE_ENFORCE_GT(input.dim(), 0, "Input's dimension must be at least 1");
const auto input_rows = input.size_to_dim(input.dim() - 1);
const auto input_columns = input.size(input.dim() - 1);
static_assert(8 % BIT_RATE == 0, "BIT_RATE must divide 8");
constexpr int NUM_ELEM_PER_BYTE = 8 / BIT_RATE;
CAFFE_ENFORCE_EQ(
input.dim(input.dim() - 1) % NUM_ELEM_PER_BYTE,
0,
"FloatToFused" + caffe2::to_string(BIT_RATE) +
"BitRowwiseQuantizedOp only works for the number of "
"columns a multiple of " +
caffe2::to_string(NUM_ELEM_PER_BYTE));
// The "fused" representation stores the scale and bias with the
// row-wise quantized data in one tensor.
// Since we represent the scale and bias in 16-bit float, we'll use the
// last 4 bytes of each row for scale (2 bytes) and bias (2 bytes).
// | ... quantized data ... | scale | bias |
// | number_of_columns | 2B | 2B |
auto output_dimensions = input.sizes().vec();
output_dimensions[input.dim() - 1] = static_cast<std::int64_t>(
(input_columns + NUM_ELEM_PER_BYTE - 1) / NUM_ELEM_PER_BYTE +
2 * sizeof(at::Half));
auto* output = Output(
DATA_FUSED_SCALE_BIAS, output_dimensions, at::dtype<std::uint8_t>());
const auto* input_data = input.template data<T>();
auto* output_data = output->template mutable_data<std::uint8_t>();
if (!GREEDY && std::is_same<T, float>::value) {
// fast path
CAFFE_ENFORCE(
reinterpret_cast<void (*)(float*, const float*, std::size_t)>(
convert) == internal::convertfp32fp32,
"When T == float, convert must be convertfp32fp32");
FloatToFusedNBitRowwiseQuantizedSBHalf(
BIT_RATE,
reinterpret_cast<const float*>(input_data),
input_rows,
input_columns,
output_data);
} else {
const auto output_columns = output->size(output->dim() - 1);
#ifdef _OPENMP
vector<float> tmp_vec(
input_columns * (GREEDY ? omp_get_max_threads() : 1));
#else
vector<float> tmp_vec(input_columns);
#endif
#pragma omp parallel for if (GREEDY)
for (int row = 0; row < input_rows; ++row) {
float* tmp = tmp_vec.data();
#ifdef _OPENMP
if (GREEDY) {
tmp = &tmp_vec[omp_get_thread_num() * input_columns];
}
#endif
convert(tmp, input_data + row * input_columns, input_columns);
std::uint8_t* output_row = output_data + row * output_columns;
at::Half* output_row_scale = reinterpret_cast<at::Half*>(
output_row +
(input_columns + NUM_ELEM_PER_BYTE - 1) / NUM_ELEM_PER_BYTE);
at::Half* output_row_bias = reinterpret_cast<at::Half*>(
output_row +
(input_columns + NUM_ELEM_PER_BYTE - 1) / NUM_ELEM_PER_BYTE +
sizeof(at::Half));
float Xmin = *std::min_element(tmp, tmp + input_columns);
float Xmax = *std::max_element(tmp, tmp + input_columns);
if (GREEDY) {
internal::param_search_greedy(
tmp, input_columns, 200, 0.16, Xmin, Xmax, BIT_RATE);
}
// Round Xmin to fp16 to match with dequantization that will use fp16
// for Xmin.
Xmin = static_cast<at::Half>(Xmin);
const float range = Xmax - Xmin;
// Round scale to fp16 to match with dequantization that will use fp16
// for scale.
// Set scale to 1.0f for the corner case of Xmax == Xmin .
// Any non-zero scale would work because during quantization
// (X - Xmin) / scale will be 0 for all X unless scale is 0.
at::Half scale = range == 0 ? 1.0f : range / ((1 << BIT_RATE) - 1);
float inverse_scale = scale == 0 ? 1.0f : 1.0f / scale;
if (scale == 0 || std::isinf(inverse_scale)) {
// Corner case handling when Xmax == Xmin
// Any scale would work because X - Xmin will be 0 for all X
scale = 1.0f;
inverse_scale = 1.0f;
}
*output_row_scale = scale;
*output_row_bias = Xmin;
for (int col = 0; col < input_columns; ++col) {
float X = tmp[col];
std::uint8_t quantized = std::max(
0,
std::min<int>(
std::lrintf((X - Xmin) * inverse_scale),
(1 << BIT_RATE) - 1));
if (col % NUM_ELEM_PER_BYTE == 0) {
output_row[col / NUM_ELEM_PER_BYTE] = quantized;
} else {
output_row[col / NUM_ELEM_PER_BYTE] |=
(quantized << ((col % NUM_ELEM_PER_BYTE) * BIT_RATE));
}
}
}
} // GREEDY || !std::is_same<T, float>::value
return true;
}
private:
INPUT_TAGS(DATA_FLOAT);
OUTPUT_TAGS(DATA_FUSED_SCALE_BIAS);
};
template <
int BIT_RATE,
typename T,
void (*convert)(T* dst, const float* src, size_t N)>
class FusedNBitRowwiseQuantizedToFloatOp final : public Operator<CPUContext> {
public:
FusedNBitRowwiseQuantizedToFloatOp(const OperatorDef& def, Workspace* ws)
: Operator<CPUContext>(def, ws) {}
~FusedNBitRowwiseQuantizedToFloatOp() override {}
bool RunOnDevice() override {
CAFFE_ENFORCE(internal::is_little_endian(), "Unsupported endianness");
const auto& input = Input(DATA_FUSED_SCALE_BIAS);
CAFFE_ENFORCE_GT(input.dim(), 0, "Input's dimension must be at least 1");
const auto input_rows = input.size_to_dim(input.dim() - 1);
const auto input_columns = input.size(input.dim() - 1);
static_assert(8 % BIT_RATE == 0, "BIT_RATE must divide 8");
constexpr int NUM_ELEM_PER_BYTE = 8 / BIT_RATE;
// The last 4 bytes per row are two fp16 scale and bias.
// The rest of input_columns is the number of values in the original row.
auto output_dimensions = input.sizes().vec();
output_dimensions[input.dim() - 1] =
static_cast<std::int64_t>(input_columns - 2 * sizeof(at::Half)) *
NUM_ELEM_PER_BYTE;
auto* output = Output(DATA_FLOAT, output_dimensions, at::dtype<T>());
const auto output_columns = output->size(output->dim() - 1);
const auto* input_data = input.template data<std::uint8_t>();
T* output_data = output->template mutable_data<T>();
if (std::is_same<T, float>::value) {
// fast path
CAFFE_ENFORCE(
reinterpret_cast<void (*)(float*, const float*, std::size_t)>(
convert) == internal::convertfp32fp32,
"When T == float, convert must be convertfp32fp32");
FusedNBitRowwiseQuantizedSBHalfToFloat(
BIT_RATE,
input_data,
input_rows,
input_columns,
reinterpret_cast<float*>(output_data));
} else {
std::vector<float> tmp(output_columns);
for (size_t row = 0; row < input_rows; ++row) {
const std::uint8_t* input_row = input_data + row * input_columns;
float scale = *reinterpret_cast<const at::Half*>(
input_row +
(output_columns + NUM_ELEM_PER_BYTE - 1) / NUM_ELEM_PER_BYTE);
float bias = *reinterpret_cast<const at::Half*>(
input_row +
(output_columns + NUM_ELEM_PER_BYTE - 1) / NUM_ELEM_PER_BYTE +
sizeof(at::Half));
for (int col = 0; col < output_columns; ++col) {
std::uint8_t quantized = input_row[col / NUM_ELEM_PER_BYTE];
quantized >>= (col % NUM_ELEM_PER_BYTE) * BIT_RATE;
quantized &= (1 << BIT_RATE) - 1;
tmp[col] = scale * quantized + bias;
}
convert(output_data + row * output_columns, tmp.data(), output_columns);
}
}
return true;
}
private:
INPUT_TAGS(DATA_FUSED_SCALE_BIAS);
OUTPUT_TAGS(DATA_FLOAT);
};
} // namespace caffe2
|
OMPIRBuilder.h | //===- IR/OpenMPIRBuilder.h - OpenMP encoding builder for LLVM IR - C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the OpenMPIRBuilder class and helpers used as a convenient
// way to create LLVM instructions for OpenMP directives.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
#define LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/Support/Allocator.h"
#include <forward_list>
namespace llvm {
class CanonicalLoopInfo;
/// An interface to create LLVM-IR for OpenMP directives.
///
/// Each OpenMP directive has a corresponding public generator method.
class OpenMPIRBuilder {
public:
/// Create a new OpenMPIRBuilder operating on the given module \p M. This will
/// not have an effect on \p M (see initialize).
OpenMPIRBuilder(Module &M) : M(M), Builder(M.getContext()) {}
~OpenMPIRBuilder();
/// Initialize the internal state, this will put structures types and
/// potentially other helpers into the underlying module. Must be called
/// before any other method and only once!
void initialize();
/// Finalize the underlying module, e.g., by outlining regions.
/// \param Fn The function to be finalized. If not used,
/// all functions are finalized.
void finalize(Function *Fn = nullptr);
/// Add attributes known for \p FnID to \p Fn.
void addAttributes(omp::RuntimeFunction FnID, Function &Fn);
/// Type used throughout for insertion points.
using InsertPointTy = IRBuilder<>::InsertPoint;
/// Callback type for variable finalization (think destructors).
///
/// \param CodeGenIP is the insertion point at which the finalization code
/// should be placed.
///
/// A finalize callback knows about all objects that need finalization, e.g.
/// destruction, when the scope of the currently generated construct is left
/// at the time, and location, the callback is invoked.
using FinalizeCallbackTy = std::function<void(InsertPointTy CodeGenIP)>;
struct FinalizationInfo {
/// The finalization callback provided by the last in-flight invocation of
/// createXXXX for the directive of kind DK.
FinalizeCallbackTy FiniCB;
/// The directive kind of the innermost directive that has an associated
/// region which might require finalization when it is left.
omp::Directive DK;
/// Flag to indicate if the directive is cancellable.
bool IsCancellable;
};
/// Push a finalization callback on the finalization stack.
///
/// NOTE: Temporary solution until Clang CG is gone.
void pushFinalizationCB(const FinalizationInfo &FI) {
FinalizationStack.push_back(FI);
}
/// Pop the last finalization callback from the finalization stack.
///
/// NOTE: Temporary solution until Clang CG is gone.
void popFinalizationCB() { FinalizationStack.pop_back(); }
/// Callback type for body (=inner region) code generation
///
/// The callback takes code locations as arguments, each describing a
/// location at which code might need to be generated or a location that is
/// the target of control transfer.
///
/// \param AllocaIP is the insertion point at which new alloca instructions
/// should be placed.
/// \param CodeGenIP is the insertion point at which the body code should be
/// placed.
/// \param ContinuationBB is the basic block target to leave the body.
///
/// Note that all blocks pointed to by the arguments have terminators.
using BodyGenCallbackTy =
function_ref<void(InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
BasicBlock &ContinuationBB)>;
// This is created primarily for sections construct as llvm::function_ref
// (BodyGenCallbackTy) is not storable (as described in the comments of
// function_ref class - function_ref contains non-ownable reference
// to the callable.
using StorableBodyGenCallbackTy =
std::function<void(InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
BasicBlock &ContinuationBB)>;
/// Callback type for loop body code generation.
///
/// \param CodeGenIP is the insertion point where the loop's body code must be
/// placed. This will be a dedicated BasicBlock with a
/// conditional branch from the loop condition check and
/// terminated with an unconditional branch to the loop
/// latch.
/// \param IndVar is the induction variable usable at the insertion point.
using LoopBodyGenCallbackTy =
function_ref<void(InsertPointTy CodeGenIP, Value *IndVar)>;
/// Callback type for variable privatization (think copy & default
/// constructor).
///
/// \param AllocaIP is the insertion point at which new alloca instructions
/// should be placed.
/// \param CodeGenIP is the insertion point at which the privatization code
/// should be placed.
/// \param Original The value being copied/created, should not be used in the
/// generated IR.
/// \param Inner The equivalent of \p Original that should be used in the
/// generated IR; this is equal to \p Original if the value is
/// a pointer and can thus be passed directly, otherwise it is
/// an equivalent but different value.
/// \param ReplVal The replacement value, thus a copy or new created version
/// of \p Inner.
///
/// \returns The new insertion point where code generation continues and
/// \p ReplVal the replacement value.
using PrivatizeCallbackTy = function_ref<InsertPointTy(
InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Value &Original,
Value &Inner, Value *&ReplVal)>;
/// Description of a LLVM-IR insertion point (IP) and a debug/source location
/// (filename, line, column, ...).
struct LocationDescription {
template <typename T, typename U>
LocationDescription(const IRBuilder<T, U> &IRB)
: IP(IRB.saveIP()), DL(IRB.getCurrentDebugLocation()) {}
LocationDescription(const InsertPointTy &IP) : IP(IP) {}
LocationDescription(const InsertPointTy &IP, const DebugLoc &DL)
: IP(IP), DL(DL) {}
InsertPointTy IP;
DebugLoc DL;
};
/// Emitter methods for OpenMP directives.
///
///{
/// Generator for '#omp barrier'
///
/// \param Loc The location where the barrier directive was encountered.
/// \param DK The kind of directive that caused the barrier.
/// \param ForceSimpleCall Flag to force a simple (=non-cancellation) barrier.
/// \param CheckCancelFlag Flag to indicate a cancel barrier return value
/// should be checked and acted upon.
///
/// \returns The insertion point after the barrier.
InsertPointTy createBarrier(const LocationDescription &Loc, omp::Directive DK,
bool ForceSimpleCall = false,
bool CheckCancelFlag = true);
/// Generator for '#omp cancel'
///
/// \param Loc The location where the directive was encountered.
/// \param IfCondition The evaluated 'if' clause expression, if any.
/// \param CanceledDirective The kind of directive that is cancled.
///
/// \returns The insertion point after the barrier.
InsertPointTy createCancel(const LocationDescription &Loc, Value *IfCondition,
omp::Directive CanceledDirective);
/// Generator for '#omp parallel'
///
/// \param Loc The insert and source location description.
/// \param AllocaIP The insertion points to be used for alloca instructions.
/// \param BodyGenCB Callback that will generate the region code.
/// \param PrivCB Callback to copy a given variable (think copy constructor).
/// \param FiniCB Callback to finalize variable copies.
/// \param IfCondition The evaluated 'if' clause expression, if any.
/// \param NumThreads The evaluated 'num_threads' clause expression, if any.
/// \param ProcBind The value of the 'proc_bind' clause (see ProcBindKind).
/// \param IsCancellable Flag to indicate a cancellable parallel region.
///
/// \returns The insertion position *after* the parallel.
IRBuilder<>::InsertPoint
createParallel(const LocationDescription &Loc, InsertPointTy AllocaIP,
BodyGenCallbackTy BodyGenCB, PrivatizeCallbackTy PrivCB,
FinalizeCallbackTy FiniCB, Value *IfCondition,
Value *NumThreads, omp::ProcBindKind ProcBind,
bool IsCancellable);
/// Generator for the control flow structure of an OpenMP canonical loop.
///
/// This generator operates on the logical iteration space of the loop, i.e.
/// the caller only has to provide a loop trip count of the loop as defined by
/// base language semantics. The trip count is interpreted as an unsigned
/// integer. The induction variable passed to \p BodyGenCB will be of the same
/// type and run from 0 to \p TripCount - 1. It is up to the callback to
/// convert the logical iteration variable to the loop counter variable in the
/// loop body.
///
/// \param Loc The insert and source location description. The insert
/// location can be between two instructions or the end of a
/// degenerate block (e.g. a BB under construction).
/// \param BodyGenCB Callback that will generate the loop body code.
/// \param TripCount Number of iterations the loop body is executed.
/// \param Name Base name used to derive BB and instruction names.
///
/// \returns An object representing the created control flow structure which
/// can be used for loop-associated directives.
CanonicalLoopInfo *createCanonicalLoop(const LocationDescription &Loc,
LoopBodyGenCallbackTy BodyGenCB,
Value *TripCount,
const Twine &Name = "loop");
/// Generator for the control flow structure of an OpenMP canonical loop.
///
/// Instead of a logical iteration space, this allows specifying user-defined
/// loop counter values using increment, upper- and lower bounds. To
/// disambiguate the terminology when counting downwards, instead of lower
/// bounds we use \p Start for the loop counter value in the first body
/// iteration.
///
/// Consider the following limitations:
///
/// * A loop counter space over all integer values of its bit-width cannot be
/// represented. E.g using uint8_t, its loop trip count of 256 cannot be
/// stored into an 8 bit integer):
///
/// DO I = 0, 255, 1
///
/// * Unsigned wrapping is only supported when wrapping only "once"; E.g.
/// effectively counting downwards:
///
/// for (uint8_t i = 100u; i > 0; i += 127u)
///
///
/// TODO: May need to add additional parameters to represent:
///
/// * Allow representing downcounting with unsigned integers.
///
/// * Sign of the step and the comparison operator might disagree:
///
/// for (int i = 0; i < 42; i -= 1u)
///
//
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the loop body code.
/// \param Start Value of the loop counter for the first iterations.
/// \param Stop Loop counter values past this will stop the loop.
/// \param Step Loop counter increment after each iteration; negative
/// means counting down.
/// \param IsSigned Whether Start, Stop and Step are signed integers.
/// \param InclusiveStop Whether \p Stop itself is a valid value for the loop
/// counter.
/// \param ComputeIP Insertion point for instructions computing the trip
/// count. Can be used to ensure the trip count is available
/// at the outermost loop of a loop nest. If not set,
/// defaults to the preheader of the generated loop.
/// \param Name Base name used to derive BB and instruction names.
///
/// \returns An object representing the created control flow structure which
/// can be used for loop-associated directives.
CanonicalLoopInfo *createCanonicalLoop(const LocationDescription &Loc,
LoopBodyGenCallbackTy BodyGenCB,
Value *Start, Value *Stop, Value *Step,
bool IsSigned, bool InclusiveStop,
InsertPointTy ComputeIP = {},
const Twine &Name = "loop");
/// Collapse a loop nest into a single loop.
///
/// Merges loops of a loop nest into a single CanonicalLoopNest representation
/// that has the same number of innermost loop iterations as the origin loop
/// nest. The induction variables of the input loops are derived from the
/// collapsed loop's induction variable. This is intended to be used to
/// implement OpenMP's collapse clause. Before applying a directive,
/// collapseLoops normalizes a loop nest to contain only a single loop and the
/// directive's implementation does not need to handle multiple loops itself.
/// This does not remove the need to handle all loop nest handling by
/// directives, such as the ordered(<n>) clause or the simd schedule-clause
/// modifier of the worksharing-loop directive.
///
/// Example:
/// \code
/// for (int i = 0; i < 7; ++i) // Canonical loop "i"
/// for (int j = 0; j < 9; ++j) // Canonical loop "j"
/// body(i, j);
/// \endcode
///
/// After collapsing with Loops={i,j}, the loop is changed to
/// \code
/// for (int ij = 0; ij < 63; ++ij) {
/// int i = ij / 9;
/// int j = ij % 9;
/// body(i, j);
/// }
/// \endcode
///
/// In the current implementation, the following limitations apply:
///
/// * All input loops have an induction variable of the same type.
///
/// * The collapsed loop will have the same trip count integer type as the
/// input loops. Therefore it is possible that the collapsed loop cannot
/// represent all iterations of the input loops. For instance, assuming a
/// 32 bit integer type, and two input loops both iterating 2^16 times, the
/// theoretical trip count of the collapsed loop would be 2^32 iteration,
/// which cannot be represented in an 32-bit integer. Behavior is undefined
/// in this case.
///
/// * The trip counts of every input loop must be available at \p ComputeIP.
/// Non-rectangular loops are not yet supported.
///
/// * At each nest level, code between a surrounding loop and its nested loop
/// is hoisted into the loop body, and such code will be executed more
/// often than before collapsing (or not at all if any inner loop iteration
/// has a trip count of 0). This is permitted by the OpenMP specification.
///
/// \param DL Debug location for instructions added for collapsing,
/// such as instructions to compute/derive the input loop's
/// induction variables.
/// \param Loops Loops in the loop nest to collapse. Loops are specified
/// from outermost-to-innermost and every control flow of a
/// loop's body must pass through its directly nested loop.
/// \param ComputeIP Where additional instruction that compute the collapsed
/// trip count. If not set, defaults to before the generated
/// loop.
///
/// \returns The CanonicalLoopInfo object representing the collapsed loop.
CanonicalLoopInfo *collapseLoops(DebugLoc DL,
ArrayRef<CanonicalLoopInfo *> Loops,
InsertPointTy ComputeIP);
/// Modifies the canonical loop to be a statically-scheduled workshare loop.
///
/// This takes a \p LoopInfo representing a canonical loop, such as the one
/// created by \p createCanonicalLoop and emits additional instructions to
/// turn it into a workshare loop. In particular, it calls to an OpenMP
/// runtime function in the preheader to obtain the loop bounds to be used in
/// the current thread, updates the relevant instructions in the canonical
/// loop and calls to an OpenMP runtime finalization function after the loop.
///
/// TODO: Workshare loops with static scheduling may contain up to two loops
/// that fulfill the requirements of an OpenMP canonical loop. One for
/// iterating over all iterations of a chunk and another one for iterating
/// over all chunks that are executed on the same thread. Returning
/// CanonicalLoopInfo objects representing them may eventually be useful for
/// the apply clause planned in OpenMP 6.0, but currently whether these are
/// canonical loops is irrelevant.
///
/// \param DL Debug location for instructions added for the
/// workshare-loop construct itself.
/// \param CLI A descriptor of the canonical loop to workshare.
/// \param AllocaIP An insertion point for Alloca instructions usable in the
/// preheader of the loop.
/// \param NeedsBarrier Indicates whether a barrier must be inserted after
/// the loop.
/// \param Chunk The size of loop chunk considered as a unit when
/// scheduling. If \p nullptr, defaults to 1.
///
/// \returns Point where to insert code after the workshare construct.
InsertPointTy applyStaticWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI,
InsertPointTy AllocaIP,
bool NeedsBarrier,
Value *Chunk = nullptr);
/// Modifies the canonical loop to be a dynamically-scheduled workshare loop.
///
/// This takes a \p LoopInfo representing a canonical loop, such as the one
/// created by \p createCanonicalLoop and emits additional instructions to
/// turn it into a workshare loop. In particular, it calls to an OpenMP
/// runtime function in the preheader to obtain, and then in each iteration
/// to update the loop counter.
///
/// \param DL Debug location for instructions added for the
/// workshare-loop construct itself.
/// \param CLI A descriptor of the canonical loop to workshare.
/// \param AllocaIP An insertion point for Alloca instructions usable in the
/// preheader of the loop.
/// \param SchedType Type of scheduling to be passed to the init function.
/// \param NeedsBarrier Indicates whether a barrier must be insterted after
/// the loop.
/// \param Chunk The size of loop chunk considered as a unit when
/// scheduling. If \p nullptr, defaults to 1.
///
/// \returns Point where to insert code after the workshare construct.
InsertPointTy applyDynamicWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI,
InsertPointTy AllocaIP,
omp::OMPScheduleType SchedType,
bool NeedsBarrier,
Value *Chunk = nullptr);
/// Modifies the canonical loop to be a workshare loop.
///
/// This takes a \p LoopInfo representing a canonical loop, such as the one
/// created by \p createCanonicalLoop and emits additional instructions to
/// turn it into a workshare loop. In particular, it calls to an OpenMP
/// runtime function in the preheader to obtain the loop bounds to be used in
/// the current thread, updates the relevant instructions in the canonical
/// loop and calls to an OpenMP runtime finalization function after the loop.
///
/// \param DL Debug location for instructions added for the
/// workshare-loop construct itself.
/// \param CLI A descriptor of the canonical loop to workshare.
/// \param AllocaIP An insertion point for Alloca instructions usable in the
/// preheader of the loop.
/// \param NeedsBarrier Indicates whether a barrier must be insterted after
/// the loop.
///
/// \returns Point where to insert code after the workshare construct.
InsertPointTy applyWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI,
InsertPointTy AllocaIP, bool NeedsBarrier);
/// Tile a loop nest.
///
/// Tiles the loops of \p Loops by the tile sizes in \p TileSizes. Loops in
/// \p/ Loops must be perfectly nested, from outermost to innermost loop
/// (i.e. Loops.front() is the outermost loop). The trip count llvm::Value
/// of every loop and every tile sizes must be usable in the outermost
/// loop's preheader. This implies that the loop nest is rectangular.
///
/// Example:
/// \code
/// for (int i = 0; i < 15; ++i) // Canonical loop "i"
/// for (int j = 0; j < 14; ++j) // Canonical loop "j"
/// body(i, j);
/// \endcode
///
/// After tiling with Loops={i,j} and TileSizes={5,7}, the loop is changed to
/// \code
/// for (int i1 = 0; i1 < 3; ++i1)
/// for (int j1 = 0; j1 < 2; ++j1)
/// for (int i2 = 0; i2 < 5; ++i2)
/// for (int j2 = 0; j2 < 7; ++j2)
/// body(i1*3+i2, j1*3+j2);
/// \endcode
///
/// The returned vector are the loops {i1,j1,i2,j2}. The loops i1 and j1 are
/// referred to the floor, and the loops i2 and j2 are the tiles. Tiling also
/// handles non-constant trip counts, non-constant tile sizes and trip counts
/// that are not multiples of the tile size. In the latter case the tile loop
/// of the last floor-loop iteration will have fewer iterations than specified
/// as its tile size.
///
///
/// @param DL Debug location for instructions added by tiling, for
/// instance the floor- and tile trip count computation.
/// @param Loops Loops to tile. The CanonicalLoopInfo objects are
/// invalidated by this method, i.e. should not used after
/// tiling.
/// @param TileSizes For each loop in \p Loops, the tile size for that
/// dimensions.
///
/// \returns A list of generated loops. Contains twice as many loops as the
/// input loop nest; the first half are the floor loops and the
/// second half are the tile loops.
std::vector<CanonicalLoopInfo *>
tileLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops,
ArrayRef<Value *> TileSizes);
/// Fully unroll a loop.
///
/// Instead of unrolling the loop immediately (and duplicating its body
/// instructions), it is deferred to LLVM's LoopUnrollPass by adding loop
/// metadata.
///
/// \param DL Debug location for instructions added by unrolling.
/// \param Loop The loop to unroll. The loop will be invalidated.
void unrollLoopFull(DebugLoc DL, CanonicalLoopInfo *Loop);
/// Fully or partially unroll a loop. How the loop is unrolled is determined
/// using LLVM's LoopUnrollPass.
///
/// \param DL Debug location for instructions added by unrolling.
/// \param Loop The loop to unroll. The loop will be invalidated.
void unrollLoopHeuristic(DebugLoc DL, CanonicalLoopInfo *Loop);
/// Partially unroll a loop.
///
/// The CanonicalLoopInfo of the unrolled loop for use with chained
/// loop-associated directive can be requested using \p UnrolledCLI. Not
/// needing the CanonicalLoopInfo allows more efficient code generation by
/// deferring the actual unrolling to the LoopUnrollPass using loop metadata.
/// A loop-associated directive applied to the unrolled loop needs to know the
/// new trip count which means that if using a heuristically determined unroll
/// factor (\p Factor == 0), that factor must be computed immediately. We are
/// using the same logic as the LoopUnrollPass to derived the unroll factor,
/// but which assumes that some canonicalization has taken place (e.g.
/// Mem2Reg, LICM, GVN, Inlining, etc.). That is, the heuristic will perform
/// better when the unrolled loop's CanonicalLoopInfo is not needed.
///
/// \param DL Debug location for instructions added by unrolling.
/// \param Loop The loop to unroll. The loop will be invalidated.
/// \param Factor The factor to unroll the loop by. A factor of 0
/// indicates that a heuristic should be used to determine
/// the unroll-factor.
/// \param UnrolledCLI If non-null, receives the CanonicalLoopInfo of the
/// partially unrolled loop. Otherwise, uses loop metadata
/// to defer unrolling to the LoopUnrollPass.
void unrollLoopPartial(DebugLoc DL, CanonicalLoopInfo *Loop, int32_t Factor,
CanonicalLoopInfo **UnrolledCLI);
/// Add metadata to simd-ize a loop.
///
/// \param DL Debug location for instructions added by unrolling.
/// \param Loop The loop to simd-ize.
void applySimd(DebugLoc DL, CanonicalLoopInfo *Loop);
/// Generator for '#omp flush'
///
/// \param Loc The location where the flush directive was encountered
void createFlush(const LocationDescription &Loc);
/// Generator for '#omp taskwait'
///
/// \param Loc The location where the taskwait directive was encountered.
void createTaskwait(const LocationDescription &Loc);
/// Generator for '#omp taskyield'
///
/// \param Loc The location where the taskyield directive was encountered.
void createTaskyield(const LocationDescription &Loc);
/// Functions used to generate reductions. Such functions take two Values
/// representing LHS and RHS of the reduction, respectively, and a reference
/// to the value that is updated to refer to the reduction result.
using ReductionGenTy =
function_ref<InsertPointTy(InsertPointTy, Value *, Value *, Value *&)>;
/// Functions used to generate atomic reductions. Such functions take two
/// Values representing pointers to LHS and RHS of the reduction, as well as
/// the element type of these pointers. They are expected to atomically
/// update the LHS to the reduced value.
using AtomicReductionGenTy =
function_ref<InsertPointTy(InsertPointTy, Type *, Value *, Value *)>;
/// Information about an OpenMP reduction.
struct ReductionInfo {
ReductionInfo(Type *ElementType, Value *Variable, Value *PrivateVariable,
ReductionGenTy ReductionGen,
AtomicReductionGenTy AtomicReductionGen)
: ElementType(ElementType), Variable(Variable),
PrivateVariable(PrivateVariable), ReductionGen(ReductionGen),
AtomicReductionGen(AtomicReductionGen) {
assert(cast<PointerType>(Variable->getType())
->isOpaqueOrPointeeTypeMatches(ElementType) && "Invalid elem type");
}
/// Reduction element type, must match pointee type of variable.
Type *ElementType;
/// Reduction variable of pointer type.
Value *Variable;
/// Thread-private partial reduction variable.
Value *PrivateVariable;
/// Callback for generating the reduction body. The IR produced by this will
/// be used to combine two values in a thread-safe context, e.g., under
/// lock or within the same thread, and therefore need not be atomic.
ReductionGenTy ReductionGen;
/// Callback for generating the atomic reduction body, may be null. The IR
/// produced by this will be used to atomically combine two values during
/// reduction. If null, the implementation will use the non-atomic version
/// along with the appropriate synchronization mechanisms.
AtomicReductionGenTy AtomicReductionGen;
};
// TODO: provide atomic and non-atomic reduction generators for reduction
// operators defined by the OpenMP specification.
/// Generator for '#omp reduction'.
///
/// Emits the IR instructing the runtime to perform the specific kind of
/// reductions. Expects reduction variables to have been privatized and
/// initialized to reduction-neutral values separately. Emits the calls to
/// runtime functions as well as the reduction function and the basic blocks
/// performing the reduction atomically and non-atomically.
///
/// The code emitted for the following:
///
/// \code
/// type var_1;
/// type var_2;
/// #pragma omp <directive> reduction(reduction-op:var_1,var_2)
/// /* body */;
/// \endcode
///
/// corresponds to the following sketch.
///
/// \code
/// void _outlined_par() {
/// // N is the number of different reductions.
/// void *red_array[] = {privatized_var_1, privatized_var_2, ...};
/// switch(__kmpc_reduce(..., N, /*size of data in red array*/, red_array,
/// _omp_reduction_func,
/// _gomp_critical_user.reduction.var)) {
/// case 1: {
/// var_1 = var_1 <reduction-op> privatized_var_1;
/// var_2 = var_2 <reduction-op> privatized_var_2;
/// // ...
/// __kmpc_end_reduce(...);
/// break;
/// }
/// case 2: {
/// _Atomic<ReductionOp>(var_1, privatized_var_1);
/// _Atomic<ReductionOp>(var_2, privatized_var_2);
/// // ...
/// break;
/// }
/// default: break;
/// }
/// }
///
/// void _omp_reduction_func(void **lhs, void **rhs) {
/// *(type *)lhs[0] = *(type *)lhs[0] <reduction-op> *(type *)rhs[0];
/// *(type *)lhs[1] = *(type *)lhs[1] <reduction-op> *(type *)rhs[1];
/// // ...
/// }
/// \endcode
///
/// \param Loc The location where the reduction was
/// encountered. Must be within the associate
/// directive and after the last local access to the
/// reduction variables.
/// \param AllocaIP An insertion point suitable for allocas usable
/// in reductions.
/// \param ReductionInfos A list of info on each reduction variable.
/// \param IsNoWait A flag set if the reduction is marked as nowait.
InsertPointTy createReductions(const LocationDescription &Loc,
InsertPointTy AllocaIP,
ArrayRef<ReductionInfo> ReductionInfos,
bool IsNoWait = false);
///}
/// Return the insertion point used by the underlying IRBuilder.
InsertPointTy getInsertionPoint() { return Builder.saveIP(); }
/// Update the internal location to \p Loc.
bool updateToLocation(const LocationDescription &Loc) {
Builder.restoreIP(Loc.IP);
Builder.SetCurrentDebugLocation(Loc.DL);
return Loc.IP.getBlock() != nullptr;
}
/// Return the function declaration for the runtime function with \p FnID.
FunctionCallee getOrCreateRuntimeFunction(Module &M,
omp::RuntimeFunction FnID);
Function *getOrCreateRuntimeFunctionPtr(omp::RuntimeFunction FnID);
/// Return the (LLVM-IR) string describing the source location \p LocStr.
Constant *getOrCreateSrcLocStr(StringRef LocStr, uint32_t &SrcLocStrSize);
/// Return the (LLVM-IR) string describing the default source location.
Constant *getOrCreateDefaultSrcLocStr(uint32_t &SrcLocStrSize);
/// Return the (LLVM-IR) string describing the source location identified by
/// the arguments.
Constant *getOrCreateSrcLocStr(StringRef FunctionName, StringRef FileName,
unsigned Line, unsigned Column,
uint32_t &SrcLocStrSize);
/// Return the (LLVM-IR) string describing the DebugLoc \p DL. Use \p F as
/// fallback if \p DL does not specify the function name.
Constant *getOrCreateSrcLocStr(DebugLoc DL, uint32_t &SrcLocStrSize,
Function *F = nullptr);
/// Return the (LLVM-IR) string describing the source location \p Loc.
Constant *getOrCreateSrcLocStr(const LocationDescription &Loc,
uint32_t &SrcLocStrSize);
/// Return an ident_t* encoding the source location \p SrcLocStr and \p Flags.
/// TODO: Create a enum class for the Reserve2Flags
Constant *getOrCreateIdent(Constant *SrcLocStr, uint32_t SrcLocStrSize,
omp::IdentFlag Flags = omp::IdentFlag(0),
unsigned Reserve2Flags = 0);
/// Create a hidden global flag \p Name in the module with initial value \p
/// Value.
GlobalValue *createGlobalFlag(unsigned Value, StringRef Name);
/// Generate control flow and cleanup for cancellation.
///
/// \param CancelFlag Flag indicating if the cancellation is performed.
/// \param CanceledDirective The kind of directive that is cancled.
/// \param ExitCB Extra code to be generated in the exit block.
void emitCancelationCheckImpl(Value *CancelFlag,
omp::Directive CanceledDirective,
FinalizeCallbackTy ExitCB = {});
/// Generate a barrier runtime call.
///
/// \param Loc The location at which the request originated and is fulfilled.
/// \param DK The directive which caused the barrier
/// \param ForceSimpleCall Flag to force a simple (=non-cancellation) barrier.
/// \param CheckCancelFlag Flag to indicate a cancel barrier return value
/// should be checked and acted upon.
///
/// \returns The insertion point after the barrier.
InsertPointTy emitBarrierImpl(const LocationDescription &Loc,
omp::Directive DK, bool ForceSimpleCall,
bool CheckCancelFlag);
/// Generate a flush runtime call.
///
/// \param Loc The location at which the request originated and is fulfilled.
void emitFlush(const LocationDescription &Loc);
/// The finalization stack made up of finalize callbacks currently in-flight,
/// wrapped into FinalizationInfo objects that reference also the finalization
/// target block and the kind of cancellable directive.
SmallVector<FinalizationInfo, 8> FinalizationStack;
/// Return true if the last entry in the finalization stack is of kind \p DK
/// and cancellable.
bool isLastFinalizationInfoCancellable(omp::Directive DK) {
return !FinalizationStack.empty() &&
FinalizationStack.back().IsCancellable &&
FinalizationStack.back().DK == DK;
}
/// Generate a taskwait runtime call.
///
/// \param Loc The location at which the request originated and is fulfilled.
void emitTaskwaitImpl(const LocationDescription &Loc);
/// Generate a taskyield runtime call.
///
/// \param Loc The location at which the request originated and is fulfilled.
void emitTaskyieldImpl(const LocationDescription &Loc);
/// Return the current thread ID.
///
/// \param Ident The ident (ident_t*) describing the query origin.
Value *getOrCreateThreadID(Value *Ident);
/// The underlying LLVM-IR module
Module &M;
/// The LLVM-IR Builder used to create IR.
IRBuilder<> Builder;
/// Map to remember source location strings
StringMap<Constant *> SrcLocStrMap;
/// Map to remember existing ident_t*.
DenseMap<std::pair<Constant *, uint64_t>, Constant *> IdentMap;
/// Helper that contains information about regions we need to outline
/// during finalization.
struct OutlineInfo {
using PostOutlineCBTy = std::function<void(Function &)>;
PostOutlineCBTy PostOutlineCB;
BasicBlock *EntryBB, *ExitBB;
SmallVector<Value *, 2> ExcludeArgsFromAggregate;
/// Collect all blocks in between EntryBB and ExitBB in both the given
/// vector and set.
void collectBlocks(SmallPtrSetImpl<BasicBlock *> &BlockSet,
SmallVectorImpl<BasicBlock *> &BlockVector);
/// Return the function that contains the region to be outlined.
Function *getFunction() const { return EntryBB->getParent(); }
};
/// Collection of regions that need to be outlined during finalization.
SmallVector<OutlineInfo, 16> OutlineInfos;
/// Collection of owned canonical loop objects that eventually need to be
/// free'd.
std::forward_list<CanonicalLoopInfo> LoopInfos;
/// Add a new region that will be outlined later.
void addOutlineInfo(OutlineInfo &&OI) { OutlineInfos.emplace_back(OI); }
/// An ordered map of auto-generated variables to their unique names.
/// It stores variables with the following names: 1) ".gomp_critical_user_" +
/// <critical_section_name> + ".var" for "omp critical" directives; 2)
/// <mangled_name_for_global_var> + ".cache." for cache for threadprivate
/// variables.
StringMap<AssertingVH<Constant>, BumpPtrAllocator> InternalVars;
/// Create the global variable holding the offload mappings information.
GlobalVariable *createOffloadMaptypes(SmallVectorImpl<uint64_t> &Mappings,
std::string VarName);
/// Create the global variable holding the offload names information.
GlobalVariable *
createOffloadMapnames(SmallVectorImpl<llvm::Constant *> &Names,
std::string VarName);
struct MapperAllocas {
AllocaInst *ArgsBase = nullptr;
AllocaInst *Args = nullptr;
AllocaInst *ArgSizes = nullptr;
};
/// Create the allocas instruction used in call to mapper functions.
void createMapperAllocas(const LocationDescription &Loc,
InsertPointTy AllocaIP, unsigned NumOperands,
struct MapperAllocas &MapperAllocas);
/// Create the call for the target mapper function.
/// \param Loc The source location description.
/// \param MapperFunc Function to be called.
/// \param SrcLocInfo Source location information global.
/// \param MaptypesArg The argument types.
/// \param MapnamesArg The argument names.
/// \param MapperAllocas The AllocaInst used for the call.
/// \param DeviceID Device ID for the call.
/// \param NumOperands Number of operands in the call.
void emitMapperCall(const LocationDescription &Loc, Function *MapperFunc,
Value *SrcLocInfo, Value *MaptypesArg, Value *MapnamesArg,
struct MapperAllocas &MapperAllocas, int64_t DeviceID,
unsigned NumOperands);
public:
/// Generator for __kmpc_copyprivate
///
/// \param Loc The source location description.
/// \param BufSize Number of elements in the buffer.
/// \param CpyBuf List of pointers to data to be copied.
/// \param CpyFn function to call for copying data.
/// \param DidIt flag variable; 1 for 'single' thread, 0 otherwise.
///
/// \return The insertion position *after* the CopyPrivate call.
InsertPointTy createCopyPrivate(const LocationDescription &Loc,
llvm::Value *BufSize, llvm::Value *CpyBuf,
llvm::Value *CpyFn, llvm::Value *DidIt);
/// Generator for '#omp single'
///
/// \param Loc The source location description.
/// \param BodyGenCB Callback that will generate the region code.
/// \param FiniCB Callback to finalize variable copies.
/// \param DidIt Local variable used as a flag to indicate 'single' thread
///
/// \returns The insertion position *after* the single call.
InsertPointTy createSingle(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB, llvm::Value *DidIt);
/// Generator for '#omp master'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region code.
/// \param FiniCB Callback to finalize variable copies.
///
/// \returns The insertion position *after* the master.
InsertPointTy createMaster(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB);
/// Generator for '#omp masked'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region code.
/// \param FiniCB Callback to finialize variable copies.
///
/// \returns The insertion position *after* the masked.
InsertPointTy createMasked(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB, Value *Filter);
/// Generator for '#omp critical'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region body code.
/// \param FiniCB Callback to finalize variable copies.
/// \param CriticalName name of the lock used by the critical directive
/// \param HintInst Hint Instruction for hint clause associated with critical
///
/// \returns The insertion position *after* the critical.
InsertPointTy createCritical(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB,
StringRef CriticalName, Value *HintInst);
/// Generator for '#omp ordered depend (source | sink)'
///
/// \param Loc The insert and source location description.
/// \param AllocaIP The insertion point to be used for alloca instructions.
/// \param NumLoops The number of loops in depend clause.
/// \param StoreValues The value will be stored in vector address.
/// \param Name The name of alloca instruction.
/// \param IsDependSource If true, depend source; otherwise, depend sink.
///
/// \return The insertion position *after* the ordered.
InsertPointTy createOrderedDepend(const LocationDescription &Loc,
InsertPointTy AllocaIP, unsigned NumLoops,
ArrayRef<llvm::Value *> StoreValues,
const Twine &Name, bool IsDependSource);
/// Generator for '#omp ordered [threads | simd]'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region code.
/// \param FiniCB Callback to finalize variable copies.
/// \param IsThreads If true, with threads clause or without clause;
/// otherwise, with simd clause;
///
/// \returns The insertion position *after* the ordered.
InsertPointTy createOrderedThreadsSimd(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB,
bool IsThreads);
/// Generator for '#omp sections'
///
/// \param Loc The insert and source location description.
/// \param AllocaIP The insertion points to be used for alloca instructions.
/// \param SectionCBs Callbacks that will generate body of each section.
/// \param PrivCB Callback to copy a given variable (think copy constructor).
/// \param FiniCB Callback to finalize variable copies.
/// \param IsCancellable Flag to indicate a cancellable parallel region.
/// \param IsNowait If true, barrier - to ensure all sections are executed
/// before moving forward will not be generated.
/// \returns The insertion position *after* the sections.
InsertPointTy createSections(const LocationDescription &Loc,
InsertPointTy AllocaIP,
ArrayRef<StorableBodyGenCallbackTy> SectionCBs,
PrivatizeCallbackTy PrivCB,
FinalizeCallbackTy FiniCB, bool IsCancellable,
bool IsNowait);
/// Generator for '#omp section'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region body code.
/// \param FiniCB Callback to finalize variable copies.
/// \returns The insertion position *after* the section.
InsertPointTy createSection(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB);
/// Generate conditional branch and relevant BasicBlocks through which private
/// threads copy the 'copyin' variables from Master copy to threadprivate
/// copies.
///
/// \param IP insertion block for copyin conditional
/// \param MasterVarPtr a pointer to the master variable
/// \param PrivateVarPtr a pointer to the threadprivate variable
/// \param IntPtrTy Pointer size type
/// \param BranchtoEnd Create a branch between the copyin.not.master blocks
// and copy.in.end block
///
/// \returns The insertion point where copying operation to be emitted.
InsertPointTy createCopyinClauseBlocks(InsertPointTy IP, Value *MasterAddr,
Value *PrivateAddr,
llvm::IntegerType *IntPtrTy,
bool BranchtoEnd = true);
/// Create a runtime call for kmpc_Alloc
///
/// \param Loc The insert and source location description.
/// \param Size Size of allocated memory space
/// \param Allocator Allocator information instruction
/// \param Name Name of call Instruction for OMP_alloc
///
/// \returns CallInst to the OMP_Alloc call
CallInst *createOMPAlloc(const LocationDescription &Loc, Value *Size,
Value *Allocator, std::string Name = "");
/// Create a runtime call for kmpc_free
///
/// \param Loc The insert and source location description.
/// \param Addr Address of memory space to be freed
/// \param Allocator Allocator information instruction
/// \param Name Name of call Instruction for OMP_Free
///
/// \returns CallInst to the OMP_Free call
CallInst *createOMPFree(const LocationDescription &Loc, Value *Addr,
Value *Allocator, std::string Name = "");
/// Create a runtime call for kmpc_threadprivate_cached
///
/// \param Loc The insert and source location description.
/// \param Pointer pointer to data to be cached
/// \param Size size of data to be cached
/// \param Name Name of call Instruction for callinst
///
/// \returns CallInst to the thread private cache call.
CallInst *createCachedThreadPrivate(const LocationDescription &Loc,
llvm::Value *Pointer,
llvm::ConstantInt *Size,
const llvm::Twine &Name = Twine(""));
/// Create a runtime call for __tgt_interop_init
///
/// \param Loc The insert and source location description.
/// \param InteropVar variable to be allocated
/// \param InteropType type of interop operation
/// \param Device devide to which offloading will occur
/// \param NumDependences number of dependence variables
/// \param DependenceAddress pointer to dependence variables
/// \param HaveNowaitClause does nowait clause exist
///
/// \returns CallInst to the __tgt_interop_init call
CallInst *createOMPInteropInit(const LocationDescription &Loc,
Value *InteropVar,
omp::OMPInteropType InteropType, Value *Device,
Value *NumDependences,
Value *DependenceAddress,
bool HaveNowaitClause);
/// Create a runtime call for __tgt_interop_destroy
///
/// \param Loc The insert and source location description.
/// \param InteropVar variable to be allocated
/// \param Device devide to which offloading will occur
/// \param NumDependences number of dependence variables
/// \param DependenceAddress pointer to dependence variables
/// \param HaveNowaitClause does nowait clause exist
///
/// \returns CallInst to the __tgt_interop_destroy call
CallInst *createOMPInteropDestroy(const LocationDescription &Loc,
Value *InteropVar, Value *Device,
Value *NumDependences,
Value *DependenceAddress,
bool HaveNowaitClause);
/// Create a runtime call for __tgt_interop_use
///
/// \param Loc The insert and source location description.
/// \param InteropVar variable to be allocated
/// \param Device devide to which offloading will occur
/// \param NumDependences number of dependence variables
/// \param DependenceAddress pointer to dependence variables
/// \param HaveNowaitClause does nowait clause exist
///
/// \returns CallInst to the __tgt_interop_use call
CallInst *createOMPInteropUse(const LocationDescription &Loc,
Value *InteropVar, Value *Device,
Value *NumDependences, Value *DependenceAddress,
bool HaveNowaitClause);
/// The `omp target` interface
///
/// For more information about the usage of this interface,
/// \see openmp/libomptarget/deviceRTLs/common/include/target.h
///
///{
/// Create a runtime call for kmpc_target_init
///
/// \param Loc The insert and source location description.
/// \param IsSPMD Flag to indicate if the kernel is an SPMD kernel or not.
/// \param RequiresFullRuntime Indicate if a full device runtime is necessary.
InsertPointTy createTargetInit(const LocationDescription &Loc, bool IsSPMD,
bool RequiresFullRuntime);
/// Create a runtime call for kmpc_target_deinit
///
/// \param Loc The insert and source location description.
/// \param IsSPMD Flag to indicate if the kernel is an SPMD kernel or not.
/// \param RequiresFullRuntime Indicate if a full device runtime is necessary.
void createTargetDeinit(const LocationDescription &Loc, bool IsSPMD,
bool RequiresFullRuntime);
///}
/// Declarations for LLVM-IR types (simple, array, function and structure) are
/// generated below. Their names are defined and used in OpenMPKinds.def. Here
/// we provide the declarations, the initializeTypes function will provide the
/// values.
///
///{
#define OMP_TYPE(VarName, InitValue) Type *VarName = nullptr;
#define OMP_ARRAY_TYPE(VarName, ElemTy, ArraySize) \
ArrayType *VarName##Ty = nullptr; \
PointerType *VarName##PtrTy = nullptr;
#define OMP_FUNCTION_TYPE(VarName, IsVarArg, ReturnType, ...) \
FunctionType *VarName = nullptr; \
PointerType *VarName##Ptr = nullptr;
#define OMP_STRUCT_TYPE(VarName, StrName, ...) \
StructType *VarName = nullptr; \
PointerType *VarName##Ptr = nullptr;
#include "llvm/Frontend/OpenMP/OMPKinds.def"
///}
private:
/// Create all simple and struct types exposed by the runtime and remember
/// the llvm::PointerTypes of them for easy access later.
void initializeTypes(Module &M);
/// Common interface for generating entry calls for OMP Directives.
/// if the directive has a region/body, It will set the insertion
/// point to the body
///
/// \param OMPD Directive to generate entry blocks for
/// \param EntryCall Call to the entry OMP Runtime Function
/// \param ExitBB block where the region ends.
/// \param Conditional indicate if the entry call result will be used
/// to evaluate a conditional of whether a thread will execute
/// body code or not.
///
/// \return The insertion position in exit block
InsertPointTy emitCommonDirectiveEntry(omp::Directive OMPD, Value *EntryCall,
BasicBlock *ExitBB,
bool Conditional = false);
/// Common interface to finalize the region
///
/// \param OMPD Directive to generate exiting code for
/// \param FinIP Insertion point for emitting Finalization code and exit call
/// \param ExitCall Call to the ending OMP Runtime Function
/// \param HasFinalize indicate if the directive will require finalization
/// and has a finalization callback in the stack that
/// should be called.
///
/// \return The insertion position in exit block
InsertPointTy emitCommonDirectiveExit(omp::Directive OMPD,
InsertPointTy FinIP,
Instruction *ExitCall,
bool HasFinalize = true);
/// Common Interface to generate OMP inlined regions
///
/// \param OMPD Directive to generate inlined region for
/// \param EntryCall Call to the entry OMP Runtime Function
/// \param ExitCall Call to the ending OMP Runtime Function
/// \param BodyGenCB Body code generation callback.
/// \param FiniCB Finalization Callback. Will be called when finalizing region
/// \param Conditional indicate if the entry call result will be used
/// to evaluate a conditional of whether a thread will execute
/// body code or not.
/// \param HasFinalize indicate if the directive will require finalization
/// and has a finalization callback in the stack that
/// should be called.
/// \param IsCancellable if HasFinalize is set to true, indicate if the
/// the directive should be cancellable.
/// \return The insertion point after the region
InsertPointTy
EmitOMPInlinedRegion(omp::Directive OMPD, Instruction *EntryCall,
Instruction *ExitCall, BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB, bool Conditional = false,
bool HasFinalize = true, bool IsCancellable = false);
/// Get the platform-specific name separator.
/// \param Parts different parts of the final name that needs separation
/// \param FirstSeparator First separator used between the initial two
/// parts of the name.
/// \param Separator separator used between all of the rest consecutive
/// parts of the name
static std::string getNameWithSeparators(ArrayRef<StringRef> Parts,
StringRef FirstSeparator,
StringRef Separator);
/// Gets (if variable with the given name already exist) or creates
/// internal global variable with the specified Name. The created variable has
/// linkage CommonLinkage by default and is initialized by null value.
/// \param Ty Type of the global variable. If it is exist already the type
/// must be the same.
/// \param Name Name of the variable.
Constant *getOrCreateOMPInternalVariable(Type *Ty, const Twine &Name,
unsigned AddressSpace = 0);
/// Returns corresponding lock object for the specified critical region
/// name. If the lock object does not exist it is created, otherwise the
/// reference to the existing copy is returned.
/// \param CriticalName Name of the critical region.
///
Value *getOMPCriticalRegionLock(StringRef CriticalName);
/// Callback type for Atomic Expression update
/// ex:
/// \code{.cpp}
/// unsigned x = 0;
/// #pragma omp atomic update
/// x = Expr(x_old); //Expr() is any legal operation
/// \endcode
///
/// \param XOld the value of the atomic memory address to use for update
/// \param IRB reference to the IRBuilder to use
///
/// \returns Value to update X to.
using AtomicUpdateCallbackTy =
const function_ref<Value *(Value *XOld, IRBuilder<> &IRB)>;
private:
enum AtomicKind { Read, Write, Update, Capture };
/// Determine whether to emit flush or not
///
/// \param Loc The insert and source location description.
/// \param AO The required atomic ordering
/// \param AK The OpenMP atomic operation kind used.
///
/// \returns wether a flush was emitted or not
bool checkAndEmitFlushAfterAtomic(const LocationDescription &Loc,
AtomicOrdering AO, AtomicKind AK);
/// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X
/// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X)
/// Only Scalar data types.
///
/// \param AllocIP Instruction to create AllocaInst before.
/// \param X The target atomic pointer to be updated
/// \param XElemTy The element type of the atomic pointer.
/// \param Expr The value to update X with.
/// \param AO Atomic ordering of the generated atomic
/// instructions.
/// \param RMWOp The binary operation used for update. If
/// operation is not supported by atomicRMW,
/// or belong to {FADD, FSUB, BAD_BINOP}.
/// Then a `cmpExch` based atomic will be generated.
/// \param UpdateOp Code generator for complex expressions that cannot be
/// expressed through atomicrmw instruction.
/// \param VolatileX true if \a X volatile?
/// \param IsXBinopExpr true if \a X is Left H.S. in Right H.S. part of the
/// update expression, false otherwise.
/// (e.g. true for X = X BinOp Expr)
///
/// \returns A pair of the old value of X before the update, and the value
/// used for the update.
std::pair<Value *, Value *>
emitAtomicUpdate(Instruction *AllocIP, Value *X, Type *XElemTy, Value *Expr,
AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp,
AtomicUpdateCallbackTy &UpdateOp, bool VolatileX,
bool IsXBinopExpr);
/// Emit the binary op. described by \p RMWOp, using \p Src1 and \p Src2 .
///
/// \Return The instruction
Value *emitRMWOpAsInstruction(Value *Src1, Value *Src2,
AtomicRMWInst::BinOp RMWOp);
public:
/// a struct to pack relevant information while generating atomic Ops
struct AtomicOpValue {
Value *Var = nullptr;
Type *ElemTy = nullptr;
bool IsSigned = false;
bool IsVolatile = false;
};
/// Emit atomic Read for : V = X --- Only Scalar data types.
///
/// \param Loc The insert and source location description.
/// \param X The target pointer to be atomically read
/// \param V Memory address where to store atomically read
/// value
/// \param AO Atomic ordering of the generated atomic
/// instructions.
///
/// \return Insertion point after generated atomic read IR.
InsertPointTy createAtomicRead(const LocationDescription &Loc,
AtomicOpValue &X, AtomicOpValue &V,
AtomicOrdering AO);
/// Emit atomic write for : X = Expr --- Only Scalar data types.
///
/// \param Loc The insert and source location description.
/// \param X The target pointer to be atomically written to
/// \param Expr The value to store.
/// \param AO Atomic ordering of the generated atomic
/// instructions.
///
/// \return Insertion point after generated atomic Write IR.
InsertPointTy createAtomicWrite(const LocationDescription &Loc,
AtomicOpValue &X, Value *Expr,
AtomicOrdering AO);
/// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X
/// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X)
/// Only Scalar data types.
///
/// \param Loc The insert and source location description.
/// \param AllocIP Instruction to create AllocaInst before.
/// \param X The target atomic pointer to be updated
/// \param Expr The value to update X with.
/// \param AO Atomic ordering of the generated atomic instructions.
/// \param RMWOp The binary operation used for update. If operation
/// is not supported by atomicRMW, or belong to
/// {FADD, FSUB, BAD_BINOP}. Then a `cmpExch` based
/// atomic will be generated.
/// \param UpdateOp Code generator for complex expressions that cannot be
/// expressed through atomicrmw instruction.
/// \param IsXBinopExpr true if \a X is Left H.S. in Right H.S. part of the
/// update expression, false otherwise.
/// (e.g. true for X = X BinOp Expr)
///
/// \return Insertion point after generated atomic update IR.
InsertPointTy createAtomicUpdate(const LocationDescription &Loc,
Instruction *AllocIP, AtomicOpValue &X,
Value *Expr, AtomicOrdering AO,
AtomicRMWInst::BinOp RMWOp,
AtomicUpdateCallbackTy &UpdateOp,
bool IsXBinopExpr);
/// Emit atomic update for constructs: --- Only Scalar data types
/// V = X; X = X BinOp Expr ,
/// X = X BinOp Expr; V = X,
/// V = X; X = Expr BinOp X,
/// X = Expr BinOp X; V = X,
/// V = X; X = UpdateOp(X),
/// X = UpdateOp(X); V = X,
///
/// \param Loc The insert and source location description.
/// \param AllocIP Instruction to create AllocaInst before.
/// \param X The target atomic pointer to be updated
/// \param V Memory address where to store captured value
/// \param Expr The value to update X with.
/// \param AO Atomic ordering of the generated atomic instructions
/// \param RMWOp The binary operation used for update. If
/// operation is not supported by atomicRMW, or belong to
/// {FADD, FSUB, BAD_BINOP}. Then a cmpExch based
/// atomic will be generated.
/// \param UpdateOp Code generator for complex expressions that cannot be
/// expressed through atomicrmw instruction.
/// \param UpdateExpr true if X is an in place update of the form
/// X = X BinOp Expr or X = Expr BinOp X
/// \param IsXBinopExpr true if X is Left H.S. in Right H.S. part of the
/// update expression, false otherwise.
/// (e.g. true for X = X BinOp Expr)
/// \param IsPostfixUpdate true if original value of 'x' must be stored in
/// 'v', not an updated one.
///
/// \return Insertion point after generated atomic capture IR.
InsertPointTy
createAtomicCapture(const LocationDescription &Loc, Instruction *AllocIP,
AtomicOpValue &X, AtomicOpValue &V, Value *Expr,
AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp,
AtomicUpdateCallbackTy &UpdateOp, bool UpdateExpr,
bool IsPostfixUpdate, bool IsXBinopExpr);
/// Create the control flow structure of a canonical OpenMP loop.
///
/// The emitted loop will be disconnected, i.e. no edge to the loop's
/// preheader and no terminator in the AfterBB. The OpenMPIRBuilder's
/// IRBuilder location is not preserved.
///
/// \param DL DebugLoc used for the instructions in the skeleton.
/// \param TripCount Value to be used for the trip count.
/// \param F Function in which to insert the BasicBlocks.
/// \param PreInsertBefore Where to insert BBs that execute before the body,
/// typically the body itself.
/// \param PostInsertBefore Where to insert BBs that execute after the body.
/// \param Name Base name used to derive BB
/// and instruction names.
///
/// \returns The CanonicalLoopInfo that represents the emitted loop.
CanonicalLoopInfo *createLoopSkeleton(DebugLoc DL, Value *TripCount,
Function *F,
BasicBlock *PreInsertBefore,
BasicBlock *PostInsertBefore,
const Twine &Name = {});
};
/// Class to represented the control flow structure of an OpenMP canonical loop.
///
/// The control-flow structure is standardized for easy consumption by
/// directives associated with loops. For instance, the worksharing-loop
/// construct may change this control flow such that each loop iteration is
/// executed on only one thread. The constraints of a canonical loop in brief
/// are:
///
/// * The number of loop iterations must have been computed before entering the
/// loop.
///
/// * Has an (unsigned) logical induction variable that starts at zero and
/// increments by one.
///
/// * The loop's CFG itself has no side-effects. The OpenMP specification
/// itself allows side-effects, but the order in which they happen, including
/// how often or whether at all, is unspecified. We expect that the frontend
/// will emit those side-effect instructions somewhere (e.g. before the loop)
/// such that the CanonicalLoopInfo itself can be side-effect free.
///
/// Keep in mind that CanonicalLoopInfo is meant to only describe a repeated
/// execution of a loop body that satifies these constraints. It does NOT
/// represent arbitrary SESE regions that happen to contain a loop. Do not use
/// CanonicalLoopInfo for such purposes.
///
/// The control flow can be described as follows:
///
/// Preheader
/// |
/// /-> Header
/// | |
/// | Cond---\
/// | | |
/// | Body |
/// | | | |
/// | <...> |
/// | | | |
/// \--Latch |
/// |
/// Exit
/// |
/// After
///
/// The loop is thought to start at PreheaderIP (at the Preheader's terminator,
/// including) and end at AfterIP (at the After's first instruction, excluding).
/// That is, instructions in the Preheader and After blocks (except the
/// Preheader's terminator) are out of CanonicalLoopInfo's control and may have
/// side-effects. Typically, the Preheader is used to compute the loop's trip
/// count. The instructions from BodyIP (at the Body block's first instruction,
/// excluding) until the Latch are also considered outside CanonicalLoopInfo's
/// control and thus can have side-effects. The body block is the single entry
/// point into the loop body, which may contain arbitrary control flow as long
/// as all control paths eventually branch to the Latch block.
///
/// TODO: Consider adding another standardized BasicBlock between Body CFG and
/// Latch to guarantee that there is only a single edge to the latch. It would
/// make loop transformations easier to not needing to consider multiple
/// predecessors of the latch (See redirectAllPredecessorsTo) and would give us
/// an equivalant to PreheaderIP, AfterIP and BodyIP for inserting code that
/// executes after each body iteration.
///
/// There must be no loop-carried dependencies through llvm::Values. This is
/// equivalant to that the Latch has no PHINode and the Header's only PHINode is
/// for the induction variable.
///
/// All code in Header, Cond, Latch and Exit (plus the terminator of the
/// Preheader) are CanonicalLoopInfo's responsibility and their build-up checked
/// by assertOK(). They are expected to not be modified unless explicitly
/// modifying the CanonicalLoopInfo through a methods that applies a OpenMP
/// loop-associated construct such as applyWorkshareLoop, tileLoops, unrollLoop,
/// etc. These methods usually invalidate the CanonicalLoopInfo and re-use its
/// basic blocks. After invalidation, the CanonicalLoopInfo must not be used
/// anymore as its underlying control flow may not exist anymore.
/// Loop-transformation methods such as tileLoops, collapseLoops and unrollLoop
/// may also return a new CanonicalLoopInfo that can be passed to other
/// loop-associated construct implementing methods. These loop-transforming
/// methods may either create a new CanonicalLoopInfo usually using
/// createLoopSkeleton and invalidate the input CanonicalLoopInfo, or reuse and
/// modify one of the input CanonicalLoopInfo and return it as representing the
/// modified loop. What is done is an implementation detail of
/// transformation-implementing method and callers should always assume that the
/// CanonicalLoopInfo passed to it is invalidated and a new object is returned.
/// Returned CanonicalLoopInfo have the same structure and guarantees as the one
/// created by createCanonicalLoop, such that transforming methods do not have
/// to special case where the CanonicalLoopInfo originated from.
///
/// Generally, methods consuming CanonicalLoopInfo do not need an
/// OpenMPIRBuilder::InsertPointTy as argument, but use the locations of the
/// CanonicalLoopInfo to insert new or modify existing instructions. Unless
/// documented otherwise, methods consuming CanonicalLoopInfo do not invalidate
/// any InsertPoint that is outside CanonicalLoopInfo's control. Specifically,
/// any InsertPoint in the Preheader, After or Block can still be used after
/// calling such a method.
///
/// TODO: Provide mechanisms for exception handling and cancellation points.
///
/// Defined outside OpenMPIRBuilder because nested classes cannot be
/// forward-declared, e.g. to avoid having to include the entire OMPIRBuilder.h.
class CanonicalLoopInfo {
friend class OpenMPIRBuilder;
private:
BasicBlock *Header = nullptr;
BasicBlock *Cond = nullptr;
BasicBlock *Latch = nullptr;
BasicBlock *Exit = nullptr;
/// Add the control blocks of this loop to \p BBs.
///
/// This does not include any block from the body, including the one returned
/// by getBody().
///
/// FIXME: This currently includes the Preheader and After blocks even though
/// their content is (mostly) not under CanonicalLoopInfo's control.
/// Re-evaluated whether this makes sense.
void collectControlBlocks(SmallVectorImpl<BasicBlock *> &BBs);
public:
/// Returns whether this object currently represents the IR of a loop. If
/// returning false, it may have been consumed by a loop transformation or not
/// been intialized. Do not use in this case;
bool isValid() const { return Header; }
/// The preheader ensures that there is only a single edge entering the loop.
/// Code that must be execute before any loop iteration can be emitted here,
/// such as computing the loop trip count and begin lifetime markers. Code in
/// the preheader is not considered part of the canonical loop.
BasicBlock *getPreheader() const;
/// The header is the entry for each iteration. In the canonical control flow,
/// it only contains the PHINode for the induction variable.
BasicBlock *getHeader() const {
assert(isValid() && "Requires a valid canonical loop");
return Header;
}
/// The condition block computes whether there is another loop iteration. If
/// yes, branches to the body; otherwise to the exit block.
BasicBlock *getCond() const {
assert(isValid() && "Requires a valid canonical loop");
return Cond;
}
/// The body block is the single entry for a loop iteration and not controlled
/// by CanonicalLoopInfo. It can contain arbitrary control flow but must
/// eventually branch to the \p Latch block.
BasicBlock *getBody() const {
assert(isValid() && "Requires a valid canonical loop");
return cast<BranchInst>(Cond->getTerminator())->getSuccessor(0);
}
/// Reaching the latch indicates the end of the loop body code. In the
/// canonical control flow, it only contains the increment of the induction
/// variable.
BasicBlock *getLatch() const {
assert(isValid() && "Requires a valid canonical loop");
return Latch;
}
/// Reaching the exit indicates no more iterations are being executed.
BasicBlock *getExit() const {
assert(isValid() && "Requires a valid canonical loop");
return Exit;
}
/// The after block is intended for clean-up code such as lifetime end
/// markers. It is separate from the exit block to ensure, analogous to the
/// preheader, it having just a single entry edge and being free from PHI
/// nodes should there be multiple loop exits (such as from break
/// statements/cancellations).
BasicBlock *getAfter() const {
assert(isValid() && "Requires a valid canonical loop");
return Exit->getSingleSuccessor();
}
/// Returns the llvm::Value containing the number of loop iterations. It must
/// be valid in the preheader and always interpreted as an unsigned integer of
/// any bit-width.
Value *getTripCount() const {
assert(isValid() && "Requires a valid canonical loop");
Instruction *CmpI = &Cond->front();
assert(isa<CmpInst>(CmpI) && "First inst must compare IV with TripCount");
return CmpI->getOperand(1);
}
/// Returns the instruction representing the current logical induction
/// variable. Always unsigned, always starting at 0 with an increment of one.
Instruction *getIndVar() const {
assert(isValid() && "Requires a valid canonical loop");
Instruction *IndVarPHI = &Header->front();
assert(isa<PHINode>(IndVarPHI) && "First inst must be the IV PHI");
return IndVarPHI;
}
/// Return the type of the induction variable (and the trip count).
Type *getIndVarType() const {
assert(isValid() && "Requires a valid canonical loop");
return getIndVar()->getType();
}
/// Return the insertion point for user code before the loop.
OpenMPIRBuilder::InsertPointTy getPreheaderIP() const {
assert(isValid() && "Requires a valid canonical loop");
BasicBlock *Preheader = getPreheader();
return {Preheader, std::prev(Preheader->end())};
};
/// Return the insertion point for user code in the body.
OpenMPIRBuilder::InsertPointTy getBodyIP() const {
assert(isValid() && "Requires a valid canonical loop");
BasicBlock *Body = getBody();
return {Body, Body->begin()};
};
/// Return the insertion point for user code after the loop.
OpenMPIRBuilder::InsertPointTy getAfterIP() const {
assert(isValid() && "Requires a valid canonical loop");
BasicBlock *After = getAfter();
return {After, After->begin()};
};
Function *getFunction() const {
assert(isValid() && "Requires a valid canonical loop");
return Header->getParent();
}
/// Consistency self-check.
void assertOK() const;
/// Invalidate this loop. That is, the underlying IR does not fulfill the
/// requirements of an OpenMP canonical loop anymore.
void invalidate();
};
} // end namespace llvm
#endif // LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
|
26_omp_heap.c | // clang-format off
// RUN: %run %s --omp 2>&1 | %filecheck %s --check-prefix=CHECK-TSAN
// RUN: %run %s --omp 2>&1 | %filecheck %s
// REQUIRES: openmp && softcounter
// clang-format on
#include <stdlib.h>
void repeat_alloc_free(unsigned n) {
for (int i = 0; i < n; i++) {
double* d = (double*)malloc(sizeof(double) * n);
free(d);
}
}
int main(int argc, char** argv) {
const int n = 1000;
// CHECK: [Trace] TypeART Runtime Trace
#pragma omp parallel sections
{
#pragma omp section
repeat_alloc_free(n);
#pragma omp section
repeat_alloc_free(n);
#pragma omp section
repeat_alloc_free(n);
}
// CHECK-TSAN-NOT: ThreadSanitizer
// CHECK-NOT: Error
// CHECK: Allocation type detail (heap, stack, global)
// CHECK: 6 : 3000 , 0 , 0 , double
// CHECK: Free allocation type detail (heap, stack)
// CHECK: 6 : 3000 , 0 , double
return 0;
} |
__clang_hip_cmath.h | /*===---- __clang_hip_cmath.h - HIP cmath decls -----------------------------===
*
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
* See https://llvm.org/LICENSE.txt for license information.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
#ifndef __CLANG_HIP_CMATH_H__
#define __CLANG_HIP_CMATH_H__
#if !defined(__HIP__) && !defined(__OPENMP_AMDGCN__)
#error "This file is for HIP and OpenMP AMDGCN device compilation only."
#endif
#if !defined(__HIPCC_RTC__)
#if defined(__cplusplus)
#include <limits>
#include <type_traits>
#include <utility>
#endif
#include <limits.h>
#include <stdint.h>
#endif // !defined(__HIPCC_RTC__)
#pragma push_macro("__DEVICE__")
#pragma push_macro("__CONSTEXPR__")
#ifdef __OPENMP_AMDGCN__
#define __DEVICE__ static __attribute__((always_inline, nothrow))
#define __CONSTEXPR__ constexpr
#else
#define __DEVICE__ static __device__ inline __attribute__((always_inline))
#define __CONSTEXPR__
#endif // __OPENMP_AMDGCN__
// Start with functions that cannot be defined by DEF macros below.
#if defined(__cplusplus)
#if defined __OPENMP_AMDGCN__
__DEVICE__ __CONSTEXPR__ float fabs(float __x) { return ::fabsf(__x); }
__DEVICE__ __CONSTEXPR__ float sin(float __x) { return ::sinf(__x); }
__DEVICE__ __CONSTEXPR__ float cos(float __x) { return ::cosf(__x); }
#endif
__DEVICE__ __CONSTEXPR__ double abs(double __x) { return ::fabs(__x); }
__DEVICE__ __CONSTEXPR__ float abs(float __x) { return ::fabsf(__x); }
__DEVICE__ __CONSTEXPR__ long long abs(long long __n) { return ::llabs(__n); }
__DEVICE__ __CONSTEXPR__ long abs(long __n) { return ::labs(__n); }
__DEVICE__ __CONSTEXPR__ float fma(float __x, float __y, float __z) {
return ::fmaf(__x, __y, __z);
}
#if !defined(__HIPCC_RTC__)
// The value returned by fpclassify is platform dependent, therefore it is not
// supported by hipRTC.
__DEVICE__ __CONSTEXPR__ int fpclassify(float __x) {
return __builtin_fpclassify(FP_NAN, FP_INFINITE, FP_NORMAL, FP_SUBNORMAL,
FP_ZERO, __x);
}
__DEVICE__ __CONSTEXPR__ int fpclassify(double __x) {
return __builtin_fpclassify(FP_NAN, FP_INFINITE, FP_NORMAL, FP_SUBNORMAL,
FP_ZERO, __x);
}
#endif // !defined(__HIPCC_RTC__)
__DEVICE__ __CONSTEXPR__ float frexp(float __arg, int *__exp) {
return ::frexpf(__arg, __exp);
}
#if defined(__OPENMP_AMDGCN__)
// For OpenMP we work around some old system headers that have non-conforming
// `isinf(float)` and `isnan(float)` implementations that return an `int`. We do
// this by providing two versions of these functions, differing only in the
// return type. To avoid conflicting definitions we disable implicit base
// function generation. That means we will end up with two specializations, one
// per type, but only one has a base function defined by the system header.
#pragma omp begin declare variant match( \
implementation = {extension(disable_implicit_base)})
// FIXME: We lack an extension to customize the mangling of the variants, e.g.,
// add a suffix. This means we would clash with the names of the variants
// (note that we do not create implicit base functions here). To avoid
// this clash we add a new trait to some of them that is always true
// (this is LLVM after all ;)). It will only influence the mangled name
// of the variants inside the inner region and avoid the clash.
#pragma omp begin declare variant match(implementation = {vendor(llvm)})
__DEVICE__ __CONSTEXPR__ int isinf(float __x) { return ::__isinff(__x); }
__DEVICE__ __CONSTEXPR__ int isinf(double __x) { return ::__isinf(__x); }
__DEVICE__ __CONSTEXPR__ int isfinite(float __x) { return ::__finitef(__x); }
__DEVICE__ __CONSTEXPR__ int isfinite(double __x) { return ::__finite(__x); }
__DEVICE__ __CONSTEXPR__ int isnan(float __x) { return ::__isnanf(__x); }
__DEVICE__ __CONSTEXPR__ int isnan(double __x) { return ::__isnan(__x); }
#pragma omp end declare variant
#endif // defined(__OPENMP_AMDGCN__)
__DEVICE__ __CONSTEXPR__ bool isinf(float __x) { return ::__isinff(__x); }
__DEVICE__ __CONSTEXPR__ bool isinf(double __x) { return ::__isinf(__x); }
__DEVICE__ __CONSTEXPR__ bool isfinite(float __x) { return ::__finitef(__x); }
__DEVICE__ __CONSTEXPR__ bool isfinite(double __x) { return ::__finite(__x); }
__DEVICE__ __CONSTEXPR__ bool isnan(float __x) { return ::__isnanf(__x); }
__DEVICE__ __CONSTEXPR__ bool isnan(double __x) { return ::__isnan(__x); }
#if defined(__OPENMP_AMDGCN__)
#pragma omp end declare variant
#endif // defined(__OPENMP_AMDGCN__)
__DEVICE__ __CONSTEXPR__ bool isgreater(float __x, float __y) {
return __builtin_isgreater(__x, __y);
}
__DEVICE__ __CONSTEXPR__ bool isgreater(double __x, double __y) {
return __builtin_isgreater(__x, __y);
}
__DEVICE__ __CONSTEXPR__ bool isgreaterequal(float __x, float __y) {
return __builtin_isgreaterequal(__x, __y);
}
__DEVICE__ __CONSTEXPR__ bool isgreaterequal(double __x, double __y) {
return __builtin_isgreaterequal(__x, __y);
}
__DEVICE__ __CONSTEXPR__ bool isless(float __x, float __y) {
return __builtin_isless(__x, __y);
}
__DEVICE__ __CONSTEXPR__ bool isless(double __x, double __y) {
return __builtin_isless(__x, __y);
}
__DEVICE__ __CONSTEXPR__ bool islessequal(float __x, float __y) {
return __builtin_islessequal(__x, __y);
}
__DEVICE__ __CONSTEXPR__ bool islessequal(double __x, double __y) {
return __builtin_islessequal(__x, __y);
}
__DEVICE__ __CONSTEXPR__ bool islessgreater(float __x, float __y) {
return __builtin_islessgreater(__x, __y);
}
__DEVICE__ __CONSTEXPR__ bool islessgreater(double __x, double __y) {
return __builtin_islessgreater(__x, __y);
}
__DEVICE__ __CONSTEXPR__ bool isnormal(float __x) {
return __builtin_isnormal(__x);
}
__DEVICE__ __CONSTEXPR__ bool isnormal(double __x) {
return __builtin_isnormal(__x);
}
__DEVICE__ __CONSTEXPR__ bool isunordered(float __x, float __y) {
return __builtin_isunordered(__x, __y);
}
__DEVICE__ __CONSTEXPR__ bool isunordered(double __x, double __y) {
return __builtin_isunordered(__x, __y);
}
__DEVICE__ __CONSTEXPR__ float modf(float __x, float *__iptr) {
return ::modff(__x, __iptr);
}
__DEVICE__ __CONSTEXPR__ float pow(float __base, int __iexp) {
return ::powif(__base, __iexp);
}
__DEVICE__ __CONSTEXPR__ double pow(double __base, int __iexp) {
return ::powi(__base, __iexp);
}
__DEVICE__ __CONSTEXPR__ float remquo(float __x, float __y, int *__quo) {
return ::remquof(__x, __y, __quo);
}
__DEVICE__ __CONSTEXPR__ float scalbln(float __x, long int __n) {
return ::scalblnf(__x, __n);
}
__DEVICE__ __CONSTEXPR__ bool signbit(float __x) { return ::__signbitf(__x); }
__DEVICE__ __CONSTEXPR__ bool signbit(double __x) { return ::__signbit(__x); }
// Notably missing above is nexttoward. We omit it because
// ocml doesn't provide an implementation, and we don't want to be in the
// business of implementing tricky libm functions in this header.
// Other functions.
__DEVICE__ __CONSTEXPR__ _Float16 fma(_Float16 __x, _Float16 __y,
_Float16 __z) {
return __ocml_fma_f16(__x, __y, __z);
}
__DEVICE__ __CONSTEXPR__ _Float16 pow(_Float16 __base, int __iexp) {
return __ocml_pown_f16(__base, __iexp);
}
#ifndef __OPENMP_AMDGCN__
// BEGIN DEF_FUN and HIP_OVERLOAD
// BEGIN DEF_FUN
#pragma push_macro("__DEF_FUN1")
#pragma push_macro("__DEF_FUN2")
#pragma push_macro("__DEF_FUN2_FI")
// Define cmath functions with float argument and returns __retty.
#define __DEF_FUN1(__retty, __func) \
__DEVICE__ __CONSTEXPR__ __retty __func(float __x) { return __func##f(__x); }
// Define cmath functions with two float arguments and returns __retty.
#define __DEF_FUN2(__retty, __func) \
__DEVICE__ __CONSTEXPR__ __retty __func(float __x, float __y) { \
return __func##f(__x, __y); \
}
// Define cmath functions with a float and an int argument and returns __retty.
#define __DEF_FUN2_FI(__retty, __func) \
__DEVICE__ __CONSTEXPR__ __retty __func(float __x, int __y) { \
return __func##f(__x, __y); \
}
__DEF_FUN1(float, acos)
__DEF_FUN1(float, acosh)
__DEF_FUN1(float, asin)
__DEF_FUN1(float, asinh)
__DEF_FUN1(float, atan)
__DEF_FUN2(float, atan2)
__DEF_FUN1(float, atanh)
__DEF_FUN1(float, cbrt)
__DEF_FUN1(float, ceil)
__DEF_FUN2(float, copysign)
__DEF_FUN1(float, cos)
__DEF_FUN1(float, cosh)
__DEF_FUN1(float, erf)
__DEF_FUN1(float, erfc)
__DEF_FUN1(float, exp)
__DEF_FUN1(float, exp2)
__DEF_FUN1(float, expm1)
__DEF_FUN1(float, fabs)
__DEF_FUN2(float, fdim)
__DEF_FUN1(float, floor)
__DEF_FUN2(float, fmax)
__DEF_FUN2(float, fmin)
__DEF_FUN2(float, fmod)
__DEF_FUN2(float, hypot)
__DEF_FUN1(int, ilogb)
__DEF_FUN2_FI(float, ldexp)
__DEF_FUN1(float, lgamma)
__DEF_FUN1(float, log)
__DEF_FUN1(float, log10)
__DEF_FUN1(float, log1p)
__DEF_FUN1(float, log2)
__DEF_FUN1(float, logb)
__DEF_FUN1(long long, llrint)
__DEF_FUN1(long long, llround)
__DEF_FUN1(long, lrint)
__DEF_FUN1(long, lround)
__DEF_FUN1(float, nearbyint)
__DEF_FUN2(float, nextafter)
__DEF_FUN2(float, pow)
__DEF_FUN2(float, remainder)
__DEF_FUN1(float, rint)
__DEF_FUN1(float, round)
__DEF_FUN2_FI(float, scalbn)
__DEF_FUN1(float, sin)
__DEF_FUN1(float, sinh)
__DEF_FUN1(float, sqrt)
__DEF_FUN1(float, tan)
__DEF_FUN1(float, tanh)
__DEF_FUN1(float, tgamma)
__DEF_FUN1(float, trunc)
#pragma pop_macro("__DEF_FUN1")
#pragma pop_macro("__DEF_FUN2")
#pragma pop_macro("__DEF_FUN2_FI")
// END DEF_FUN
// BEGIN HIP_OVERLOAD
#pragma push_macro("__HIP_OVERLOAD1")
#pragma push_macro("__HIP_OVERLOAD2")
// __hip_enable_if::type is a type function which returns __T if __B is true.
template <bool __B, class __T = void> struct __hip_enable_if {};
template <class __T> struct __hip_enable_if<true, __T> { typedef __T type; };
namespace __hip {
template <class _Tp> struct is_integral {
enum { value = 0 };
};
template <> struct is_integral<bool> {
enum { value = 1 };
};
template <> struct is_integral<char> {
enum { value = 1 };
};
template <> struct is_integral<signed char> {
enum { value = 1 };
};
template <> struct is_integral<unsigned char> {
enum { value = 1 };
};
template <> struct is_integral<wchar_t> {
enum { value = 1 };
};
template <> struct is_integral<short> {
enum { value = 1 };
};
template <> struct is_integral<unsigned short> {
enum { value = 1 };
};
template <> struct is_integral<int> {
enum { value = 1 };
};
template <> struct is_integral<unsigned int> {
enum { value = 1 };
};
template <> struct is_integral<long> {
enum { value = 1 };
};
template <> struct is_integral<unsigned long> {
enum { value = 1 };
};
template <> struct is_integral<long long> {
enum { value = 1 };
};
template <> struct is_integral<unsigned long long> {
enum { value = 1 };
};
// ToDo: specializes is_arithmetic<_Float16>
template <class _Tp> struct is_arithmetic {
enum { value = 0 };
};
template <> struct is_arithmetic<bool> {
enum { value = 1 };
};
template <> struct is_arithmetic<char> {
enum { value = 1 };
};
template <> struct is_arithmetic<signed char> {
enum { value = 1 };
};
template <> struct is_arithmetic<unsigned char> {
enum { value = 1 };
};
template <> struct is_arithmetic<wchar_t> {
enum { value = 1 };
};
template <> struct is_arithmetic<short> {
enum { value = 1 };
};
template <> struct is_arithmetic<unsigned short> {
enum { value = 1 };
};
template <> struct is_arithmetic<int> {
enum { value = 1 };
};
template <> struct is_arithmetic<unsigned int> {
enum { value = 1 };
};
template <> struct is_arithmetic<long> {
enum { value = 1 };
};
template <> struct is_arithmetic<unsigned long> {
enum { value = 1 };
};
template <> struct is_arithmetic<long long> {
enum { value = 1 };
};
template <> struct is_arithmetic<unsigned long long> {
enum { value = 1 };
};
template <> struct is_arithmetic<float> {
enum { value = 1 };
};
template <> struct is_arithmetic<double> {
enum { value = 1 };
};
struct true_type {
static const __constant__ bool value = true;
};
struct false_type {
static const __constant__ bool value = false;
};
template <typename __T, typename __U> struct is_same : public false_type {};
template <typename __T> struct is_same<__T, __T> : public true_type {};
template <typename __T> struct add_rvalue_reference { typedef __T &&type; };
template <typename __T> typename add_rvalue_reference<__T>::type declval();
// decltype is only available in C++11 and above.
#if __cplusplus >= 201103L
// __hip_promote
template <class _Tp> struct __numeric_type {
static void __test(...);
static _Float16 __test(_Float16);
static float __test(float);
static double __test(char);
static double __test(int);
static double __test(unsigned);
static double __test(long);
static double __test(unsigned long);
static double __test(long long);
static double __test(unsigned long long);
static double __test(double);
// No support for long double, use double instead.
static double __test(long double);
typedef decltype(__test(declval<_Tp>())) type;
static const bool value = !is_same<type, void>::value;
};
template <> struct __numeric_type<void> { static const bool value = true; };
template <class _A1, class _A2 = void, class _A3 = void,
bool = __numeric_type<_A1>::value &&__numeric_type<_A2>::value
&&__numeric_type<_A3>::value>
class __promote_imp {
public:
static const bool value = false;
};
template <class _A1, class _A2, class _A3>
class __promote_imp<_A1, _A2, _A3, true> {
private:
typedef typename __promote_imp<_A1>::type __type1;
typedef typename __promote_imp<_A2>::type __type2;
typedef typename __promote_imp<_A3>::type __type3;
public:
typedef decltype(__type1() + __type2() + __type3()) type;
static const bool value = true;
};
template <class _A1, class _A2> class __promote_imp<_A1, _A2, void, true> {
private:
typedef typename __promote_imp<_A1>::type __type1;
typedef typename __promote_imp<_A2>::type __type2;
public:
typedef decltype(__type1() + __type2()) type;
static const bool value = true;
};
template <class _A1> class __promote_imp<_A1, void, void, true> {
public:
typedef typename __numeric_type<_A1>::type type;
static const bool value = true;
};
template <class _A1, class _A2 = void, class _A3 = void>
class __promote : public __promote_imp<_A1, _A2, _A3> {};
#endif //__cplusplus >= 201103L
} // namespace __hip
// __HIP_OVERLOAD1 is used to resolve function calls with integer argument to
// avoid compilation error due to ambibuity. e.g. floor(5) is resolved with
// floor(double).
#define __HIP_OVERLOAD1(__retty, __fn) \
template <typename __T> \
__DEVICE__ __CONSTEXPR__ \
typename __hip_enable_if<__hip::is_integral<__T>::value, __retty>::type \
__fn(__T __x) { \
return ::__fn((double)__x); \
}
// __HIP_OVERLOAD2 is used to resolve function calls with mixed float/double
// or integer argument to avoid compilation error due to ambibuity. e.g.
// max(5.0f, 6.0) is resolved with max(double, double).
#if __cplusplus >= 201103L
#define __HIP_OVERLOAD2(__retty, __fn) \
template <typename __T1, typename __T2> \
__DEVICE__ __CONSTEXPR__ typename __hip_enable_if< \
__hip::is_arithmetic<__T1>::value && __hip::is_arithmetic<__T2>::value, \
typename __hip::__promote<__T1, __T2>::type>::type \
__fn(__T1 __x, __T2 __y) { \
typedef typename __hip::__promote<__T1, __T2>::type __result_type; \
return __fn((__result_type)__x, (__result_type)__y); \
}
#else
#define __HIP_OVERLOAD2(__retty, __fn) \
template <typename __T1, typename __T2> \
__DEVICE__ __CONSTEXPR__ \
typename __hip_enable_if<__hip::is_arithmetic<__T1>::value && \
__hip::is_arithmetic<__T2>::value, \
__retty>::type \
__fn(__T1 __x, __T2 __y) { \
return __fn((double)__x, (double)__y); \
}
#endif
__HIP_OVERLOAD1(double, acos)
__HIP_OVERLOAD1(double, acosh)
__HIP_OVERLOAD1(double, asin)
__HIP_OVERLOAD1(double, asinh)
__HIP_OVERLOAD1(double, atan)
__HIP_OVERLOAD2(double, atan2)
__HIP_OVERLOAD1(double, atanh)
__HIP_OVERLOAD1(double, cbrt)
__HIP_OVERLOAD1(double, ceil)
__HIP_OVERLOAD2(double, copysign)
__HIP_OVERLOAD1(double, cos)
__HIP_OVERLOAD1(double, cosh)
__HIP_OVERLOAD1(double, erf)
__HIP_OVERLOAD1(double, erfc)
__HIP_OVERLOAD1(double, exp)
__HIP_OVERLOAD1(double, exp2)
__HIP_OVERLOAD1(double, expm1)
__HIP_OVERLOAD1(double, fabs)
__HIP_OVERLOAD2(double, fdim)
__HIP_OVERLOAD1(double, floor)
__HIP_OVERLOAD2(double, fmax)
__HIP_OVERLOAD2(double, fmin)
__HIP_OVERLOAD2(double, fmod)
#if !defined(__HIPCC_RTC__)
__HIP_OVERLOAD1(int, fpclassify)
#endif // !defined(__HIPCC_RTC__)
__HIP_OVERLOAD2(double, hypot)
__HIP_OVERLOAD1(int, ilogb)
__HIP_OVERLOAD1(bool, isfinite)
__HIP_OVERLOAD2(bool, isgreater)
__HIP_OVERLOAD2(bool, isgreaterequal)
__HIP_OVERLOAD1(bool, isinf)
__HIP_OVERLOAD2(bool, isless)
__HIP_OVERLOAD2(bool, islessequal)
__HIP_OVERLOAD2(bool, islessgreater)
__HIP_OVERLOAD1(bool, isnan)
__HIP_OVERLOAD1(bool, isnormal)
__HIP_OVERLOAD2(bool, isunordered)
__HIP_OVERLOAD1(double, lgamma)
__HIP_OVERLOAD1(double, log)
__HIP_OVERLOAD1(double, log10)
__HIP_OVERLOAD1(double, log1p)
__HIP_OVERLOAD1(double, log2)
__HIP_OVERLOAD1(double, logb)
__HIP_OVERLOAD1(long long, llrint)
__HIP_OVERLOAD1(long long, llround)
__HIP_OVERLOAD1(long, lrint)
__HIP_OVERLOAD1(long, lround)
__HIP_OVERLOAD1(double, nearbyint)
__HIP_OVERLOAD2(double, nextafter)
__HIP_OVERLOAD2(double, pow)
__HIP_OVERLOAD2(double, remainder)
__HIP_OVERLOAD1(double, rint)
__HIP_OVERLOAD1(double, round)
__HIP_OVERLOAD1(bool, signbit)
__HIP_OVERLOAD1(double, sin)
__HIP_OVERLOAD1(double, sinh)
__HIP_OVERLOAD1(double, sqrt)
__HIP_OVERLOAD1(double, tan)
__HIP_OVERLOAD1(double, tanh)
__HIP_OVERLOAD1(double, tgamma)
__HIP_OVERLOAD1(double, trunc)
// Overload these but don't add them to std, they are not part of cmath.
__HIP_OVERLOAD2(double, max)
__HIP_OVERLOAD2(double, min)
// Additional Overloads that don't quite match HIP_OVERLOAD.
#if __cplusplus >= 201103L
template <typename __T1, typename __T2, typename __T3>
__DEVICE__ __CONSTEXPR__ typename __hip_enable_if<
__hip::is_arithmetic<__T1>::value && __hip::is_arithmetic<__T2>::value &&
__hip::is_arithmetic<__T3>::value,
typename __hip::__promote<__T1, __T2, __T3>::type>::type
fma(__T1 __x, __T2 __y, __T3 __z) {
typedef typename __hip::__promote<__T1, __T2, __T3>::type __result_type;
return ::fma((__result_type)__x, (__result_type)__y, (__result_type)__z);
}
#else
template <typename __T1, typename __T2, typename __T3>
__DEVICE__ __CONSTEXPR__
typename __hip_enable_if<__hip::is_arithmetic<__T1>::value &&
__hip::is_arithmetic<__T2>::value &&
__hip::is_arithmetic<__T3>::value,
double>::type
fma(__T1 __x, __T2 __y, __T3 __z) {
return ::fma((double)__x, (double)__y, (double)__z);
}
#endif
template <typename __T>
__DEVICE__ __CONSTEXPR__
typename __hip_enable_if<__hip::is_integral<__T>::value, double>::type
frexp(__T __x, int *__exp) {
return ::frexp((double)__x, __exp);
}
template <typename __T>
__DEVICE__ __CONSTEXPR__
typename __hip_enable_if<__hip::is_integral<__T>::value, double>::type
ldexp(__T __x, int __exp) {
return ::ldexp((double)__x, __exp);
}
template <typename __T>
__DEVICE__ __CONSTEXPR__
typename __hip_enable_if<__hip::is_integral<__T>::value, double>::type
modf(__T __x, double *__exp) {
return ::modf((double)__x, __exp);
}
#if __cplusplus >= 201103L
template <typename __T1, typename __T2>
__DEVICE__ __CONSTEXPR__
typename __hip_enable_if<__hip::is_arithmetic<__T1>::value &&
__hip::is_arithmetic<__T2>::value,
typename __hip::__promote<__T1, __T2>::type>::type
remquo(__T1 __x, __T2 __y, int *__quo) {
typedef typename __hip::__promote<__T1, __T2>::type __result_type;
return ::remquo((__result_type)__x, (__result_type)__y, __quo);
}
#else
template <typename __T1, typename __T2>
__DEVICE__ __CONSTEXPR__
typename __hip_enable_if<__hip::is_arithmetic<__T1>::value &&
__hip::is_arithmetic<__T2>::value,
double>::type
remquo(__T1 __x, __T2 __y, int *__quo) {
return ::remquo((double)__x, (double)__y, __quo);
}
#endif
template <typename __T>
__DEVICE__ __CONSTEXPR__
typename __hip_enable_if<__hip::is_integral<__T>::value, double>::type
scalbln(__T __x, long int __exp) {
return ::scalbln((double)__x, __exp);
}
template <typename __T>
__DEVICE__ __CONSTEXPR__
typename __hip_enable_if<__hip::is_integral<__T>::value, double>::type
scalbn(__T __x, int __exp) {
return ::scalbn((double)__x, __exp);
}
#pragma pop_macro("__HIP_OVERLOAD1")
#pragma pop_macro("__HIP_OVERLOAD2")
// END HIP_OVERLOAD
// END DEF_FUN and HIP_OVERLOAD
#endif // ifndef __OPENMP_AMDGCN__
#endif // defined(__cplusplus)
#ifndef __OPENMP_AMDGCN__
// Define these overloads inside the namespace our standard library uses.
#if !defined(__HIPCC_RTC__)
#ifdef _LIBCPP_BEGIN_NAMESPACE_STD
_LIBCPP_BEGIN_NAMESPACE_STD
#else
namespace std {
#ifdef _GLIBCXX_BEGIN_NAMESPACE_VERSION
_GLIBCXX_BEGIN_NAMESPACE_VERSION
#endif // _GLIBCXX_BEGIN_NAMESPACE_VERSION
#endif // _LIBCPP_BEGIN_NAMESPACE_STD
// Pull the new overloads we defined above into namespace std.
// using ::abs; - This may be considered for C++.
using ::acos;
using ::acosh;
using ::asin;
using ::asinh;
using ::atan;
using ::atan2;
using ::atanh;
using ::cbrt;
using ::ceil;
using ::copysign;
using ::cos;
using ::cosh;
using ::erf;
using ::erfc;
using ::exp;
using ::exp2;
using ::expm1;
using ::fabs;
using ::fdim;
using ::floor;
using ::fma;
using ::fmax;
using ::fmin;
using ::fmod;
using ::fpclassify;
using ::frexp;
using ::hypot;
using ::ilogb;
using ::isfinite;
using ::isgreater;
using ::isgreaterequal;
using ::isless;
using ::islessequal;
using ::islessgreater;
using ::isnormal;
using ::isunordered;
using ::ldexp;
using ::lgamma;
using ::llrint;
using ::llround;
using ::log;
using ::log10;
using ::log1p;
using ::log2;
using ::logb;
using ::lrint;
using ::lround;
using ::modf;
// using ::nan; - This may be considered for C++.
// using ::nanf; - This may be considered for C++.
// using ::nanl; - This is not yet defined.
using ::nearbyint;
using ::nextafter;
// using ::nexttoward; - Omit this since we do not have a definition.
using ::pow;
using ::remainder;
using ::remquo;
using ::rint;
using ::round;
using ::scalbln;
using ::scalbn;
using ::signbit;
using ::sin;
using ::sinh;
using ::sqrt;
using ::tan;
using ::tanh;
using ::tgamma;
using ::trunc;
// Well this is fun: We need to pull these symbols in for libc++, but we can't
// pull them in with libstdc++, because its ::isinf and ::isnan are different
// than its std::isinf and std::isnan.
#ifndef __GLIBCXX__
using ::isinf;
using ::isnan;
#endif
// Finally, pull the "foobarf" functions that HIP defines into std.
using ::acosf;
using ::acoshf;
using ::asinf;
using ::asinhf;
using ::atan2f;
using ::atanf;
using ::atanhf;
using ::cbrtf;
using ::ceilf;
using ::copysignf;
using ::cosf;
using ::coshf;
using ::erfcf;
using ::erff;
using ::exp2f;
using ::expf;
using ::expm1f;
using ::fabsf;
using ::fdimf;
using ::floorf;
using ::fmaf;
using ::fmaxf;
using ::fminf;
using ::fmodf;
using ::frexpf;
using ::hypotf;
using ::ilogbf;
using ::ldexpf;
using ::lgammaf;
using ::llrintf;
using ::llroundf;
using ::log10f;
using ::log1pf;
using ::log2f;
using ::logbf;
using ::logf;
using ::lrintf;
using ::lroundf;
using ::modff;
using ::nearbyintf;
using ::nextafterf;
// using ::nexttowardf; - Omit this since we do not have a definition.
using ::powf;
using ::remainderf;
using ::remquof;
using ::rintf;
using ::roundf;
using ::scalblnf;
using ::scalbnf;
using ::sinf;
using ::sinhf;
using ::sqrtf;
using ::tanf;
using ::tanhf;
using ::tgammaf;
using ::truncf;
#ifdef _LIBCPP_END_NAMESPACE_STD
_LIBCPP_END_NAMESPACE_STD
#else
#ifdef _GLIBCXX_BEGIN_NAMESPACE_VERSION
_GLIBCXX_END_NAMESPACE_VERSION
#endif // _GLIBCXX_BEGIN_NAMESPACE_VERSION
} // namespace std
#endif // _LIBCPP_END_NAMESPACE_STD
#endif // !defined(__HIPCC_RTC__)
// Define device-side math functions from <ymath.h> on MSVC.
#if !defined(__HIPCC_RTC__)
#if defined(_MSC_VER)
// Before VS2019, `<ymath.h>` is also included in `<limits>` and other headers.
// But, from VS2019, it's only included in `<complex>`. Need to include
// `<ymath.h>` here to ensure C functions declared there won't be markded as
// `__host__` and `__device__` through `<complex>` wrapper.
#include <ymath.h>
#if defined(__cplusplus)
extern "C" {
#endif // defined(__cplusplus)
__DEVICE__ __CONSTEXPR__ __attribute__((overloadable)) double _Cosh(double x,
double y) {
return cosh(x) * y;
}
__DEVICE__ __CONSTEXPR__ __attribute__((overloadable)) float _FCosh(float x,
float y) {
return coshf(x) * y;
}
__DEVICE__ __CONSTEXPR__ __attribute__((overloadable)) short _Dtest(double *p) {
return fpclassify(*p);
}
__DEVICE__ __CONSTEXPR__ __attribute__((overloadable)) short _FDtest(float *p) {
return fpclassify(*p);
}
__DEVICE__ __CONSTEXPR__ __attribute__((overloadable)) double _Sinh(double x,
double y) {
return sinh(x) * y;
}
__DEVICE__ __CONSTEXPR__ __attribute__((overloadable)) float _FSinh(float x,
float y) {
return sinhf(x) * y;
}
#if defined(__cplusplus)
}
#endif // defined(__cplusplus)
#endif // defined(_MSC_VER)
#endif // !defined(__HIPCC_RTC__)
#endif // ifndef __OPENMP_AMDGCN__
#pragma pop_macro("__DEVICE__")
#pragma pop_macro("__CONSTEXPR__")
#endif // __CLANG_HIP_CMATH_H__
|
graph_generator.c | /* Copyright (C) 2009-2010 The Trustees of Indiana University. */
/* */
/* Use, modification and distribution is subject to the Boost Software */
/* License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at */
/* http://www.boost.org/LICENSE_1_0.txt) */
/* */
/* Authors: Jeremiah Willcock */
/* Andrew Lumsdaine */
#include <stdlib.h>
#include <stdint.h>
#include <assert.h>
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif
#include <inttypes.h>
#include "user_settings.h"
#include "splittable_mrg.h"
#include "graph_generator.h"
/* Initiator settings: for faster random number generation, the initiator
* probabilities are defined as fractions (a = INITIATOR_A_NUMERATOR /
* INITIATOR_DENOMINATOR, b = c = INITIATOR_BC_NUMERATOR /
* INITIATOR_DENOMINATOR, d = 1 - a - b - c. */
#define INITIATOR_A_NUMERATOR 5700
#define INITIATOR_BC_NUMERATOR 1900
#define INITIATOR_DENOMINATOR 10000
/* If this macro is defined to a non-zero value, use SPK_NOISE_LEVEL /
* INITIATOR_DENOMINATOR as the noise parameter to use in introducing noise
* into the graph parameters. The approach used is from "A Hitchhiker's Guide
* to Choosing Parameters of Stochastic Kronecker Graphs" by C. Seshadhri, Ali
* Pinar, and Tamara G. Kolda (http://arxiv.org/abs/1102.5046v1), except that
* the adjustment here is chosen based on the current level being processed
* rather than being chosen randomly. */
#define SPK_NOISE_LEVEL 1000
/* #define SPK_NOISE_LEVEL 1000 -- in INITIATOR_DENOMINATOR units */
static int generate_4way_bernoulli(mrg_state* st, int level, int nlevels) {
/* Generator a pseudorandom number in the range [0, INITIATOR_DENOMINATOR)
* without modulo bias. */
static const uint32_t limit = (UINT32_C(0xFFFFFFFF) % INITIATOR_DENOMINATOR);
uint32_t val = mrg_get_uint_orig(st);
if (/* Unlikely */ val < limit) {
do {
val = mrg_get_uint_orig(st);
} while (val < limit);
}
#if SPK_NOISE_LEVEL == 0
int spk_noise_factor = 0;
#else
int spk_noise_factor = 2 * SPK_NOISE_LEVEL * level / nlevels - SPK_NOISE_LEVEL;
#endif
int adjusted_bc_numerator = INITIATOR_BC_NUMERATOR + spk_noise_factor;
val %= INITIATOR_DENOMINATOR;
if (val < adjusted_bc_numerator) return 1;
val -= adjusted_bc_numerator;
if (val < adjusted_bc_numerator) return 2;
val -= adjusted_bc_numerator;
#if SPK_NOISE_LEVEL == 0
if (val < INITIATOR_A_NUMERATOR) return 0;
#else
if (val < INITIATOR_A_NUMERATOR * (INITIATOR_DENOMINATOR - 2 * INITIATOR_BC_NUMERATOR) / (INITIATOR_DENOMINATOR - 2 * adjusted_bc_numerator)) return 0;
#endif
return 3;
}
/* Reverse bits in a number; this should be optimized for performance
* (including using bit- or byte-reverse intrinsics if your platform has them).
* */
static inline uint64_t bitreverse(uint64_t x) {
#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)
#define USE_GCC_BYTESWAP /* __builtin_bswap* are in 4.3 but not 4.2 */
#endif
#ifdef FAST_64BIT_ARITHMETIC
/* 64-bit code */
#ifdef USE_GCC_BYTESWAP
x = __builtin_bswap64(x);
#else
x = (x >> 32) | (x << 32);
x = ((x >> 16) & UINT64_C(0x0000FFFF0000FFFF)) | ((x & UINT64_C(0x0000FFFF0000FFFF)) << 16);
x = ((x >> 8) & UINT64_C(0x00FF00FF00FF00FF)) | ((x & UINT64_C(0x00FF00FF00FF00FF)) << 8);
#endif
x = ((x >> 4) & UINT64_C(0x0F0F0F0F0F0F0F0F)) | ((x & UINT64_C(0x0F0F0F0F0F0F0F0F)) << 4);
x = ((x >> 2) & UINT64_C(0x3333333333333333)) | ((x & UINT64_C(0x3333333333333333)) << 2);
x = ((x >> 1) & UINT64_C(0x5555555555555555)) | ((x & UINT64_C(0x5555555555555555)) << 1);
return x;
#else
/* 32-bit code */
uint32_t h = (uint32_t)(x >> 32);
uint32_t l = (uint32_t)(x & UINT32_MAX);
#ifdef USE_GCC_BYTESWAP
h = __builtin_bswap32(h);
l = __builtin_bswap32(l);
#else
h = (h >> 16) | (h << 16);
l = (l >> 16) | (l << 16);
h = ((h >> 8) & UINT32_C(0x00FF00FF)) | ((h & UINT32_C(0x00FF00FF)) << 8);
l = ((l >> 8) & UINT32_C(0x00FF00FF)) | ((l & UINT32_C(0x00FF00FF)) << 8);
#endif
h = ((h >> 4) & UINT32_C(0x0F0F0F0F)) | ((h & UINT32_C(0x0F0F0F0F)) << 4);
l = ((l >> 4) & UINT32_C(0x0F0F0F0F)) | ((l & UINT32_C(0x0F0F0F0F)) << 4);
h = ((h >> 2) & UINT32_C(0x33333333)) | ((h & UINT32_C(0x33333333)) << 2);
l = ((l >> 2) & UINT32_C(0x33333333)) | ((l & UINT32_C(0x33333333)) << 2);
h = ((h >> 1) & UINT32_C(0x55555555)) | ((h & UINT32_C(0x55555555)) << 1);
l = ((l >> 1) & UINT32_C(0x55555555)) | ((l & UINT32_C(0x55555555)) << 1);
return ((uint64_t)l << 32) | h; /* Swap halves */
#endif
}
/* Apply a permutation to scramble vertex numbers; a randomly generated
* permutation is not used because applying it at scale is too expensive. */
static inline int64_t scramble(int64_t v0, int lgN, uint64_t val0, uint64_t val1) {
uint64_t v = (uint64_t)v0;
v += val0 + val1;
v *= (val0 | UINT64_C(0x4519840211493211));
v = (bitreverse(v) >> (64 - lgN));
assert ((v >> lgN) == 0);
v *= (val1 | UINT64_C(0x3050852102C843A5));
v = (bitreverse(v) >> (64 - lgN));
assert ((v >> lgN) == 0);
return (int64_t)v;
}
/* Make a single graph edge using a pre-set MRG state. */
static
void make_one_edge(int64_t nverts, int level, int lgN, mrg_state* st, packed_edge* result, uint64_t val0, uint64_t val1) {
int64_t base_src = 0, base_tgt = 0;
while (nverts > 1) {
int square = generate_4way_bernoulli(st, level, lgN);
int src_offset = square / 2;
int tgt_offset = square % 2;
assert (base_src <= base_tgt);
if (base_src == base_tgt) {
/* Clip-and-flip for undirected graph */
if (src_offset > tgt_offset) {
int temp = src_offset;
src_offset = tgt_offset;
tgt_offset = temp;
}
}
nverts /= 2;
++level;
base_src += nverts * src_offset;
base_tgt += nverts * tgt_offset;
}
write_edge(result,
scramble(base_src, lgN, val0, val1),
scramble(base_tgt, lgN, val0, val1));
}
/* Generate a range of edges (from start_edge to end_edge of the total graph),
* writing into elements [0, end_edge - start_edge) of the edges array. This
* code is parallel on OpenMP and XMT; it must be used with
* separately-implemented SPMD parallelism for MPI. */
void generate_kronecker_range(
const uint_fast32_t seed[5] /* All values in [0, 2^31 - 1), not all zero */,
int logN /* In base 2 */,
int64_t start_edge, int64_t end_edge,
packed_edge* edges) {
mrg_state state;
int64_t nverts = (int64_t)1 << logN;
int64_t ei;
mrg_seed(&state, seed);
uint64_t val0, val1; /* Values for scrambling */
{
mrg_state new_state = state;
mrg_skip(&new_state, 50, 7, 0);
val0 = mrg_get_uint_orig(&new_state);
val0 *= UINT64_C(0xFFFFFFFF);
val0 += mrg_get_uint_orig(&new_state);
val1 = mrg_get_uint_orig(&new_state);
val1 *= UINT64_C(0xFFFFFFFF);
val1 += mrg_get_uint_orig(&new_state);
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(1)
#endif
#ifdef __MTA__
#pragma mta assert parallel
#pragma mta block schedule
#endif
for (ei = start_edge; ei < end_edge; ++ei) {
mrg_state new_state = state;
mrg_skip(&new_state, 0, ei, 0);
make_one_edge(nverts, 0, logN, &new_state, edges + (ei - start_edge), val0, val1);
}
}
|
GB_unaryop__abs_uint64_fp64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_uint64_fp64
// op(A') function: GB_tran__abs_uint64_fp64
// C type: uint64_t
// A type: double
// cast: uint64_t cij ; GB_CAST_UNSIGNED(cij,aij,64)
// unaryop: cij = aij
#define GB_ATYPE \
double
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint64_t z ; GB_CAST_UNSIGNED(z,x,64) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_UINT64 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_uint64_fp64
(
uint64_t *restrict Cx,
const double *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_uint64_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
rawmd5u_fmt_plug.c | /*
* Thick raw-md5-unicode (come-back :)
*
* This software is Copyright (c) 2011 magnum, and it is hereby released to the
* general public under the following terms: Redistribution and use in source
* and binary forms, with or without modification, are permitted.
*
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_rawmd5uthick;
#elif FMT_REGISTERS_H
john_register_one(&fmt_rawmd5uthick);
#else
#include <string.h>
#include "arch.h"
#ifdef SIMD_COEF_32
#define NBKEYS (SIMD_COEF_32 * SIMD_PARA_MD5)
#endif
#include "simd-intrinsics.h"
#include "md5.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "options.h"
#include "unicode.h"
#include "memory.h"
#include "johnswap.h"
#include "memdbg.h"
#define FORMAT_LABEL "Raw-MD5u"
#define FORMAT_NAME ""
#define ALGORITHM_NAME "md5(utf16($p)) " MD5_ALGORITHM_NAME
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define CIPHERTEXT_LENGTH 32
#define BINARY_SIZE 16
#define BINARY_ALIGN 4
#define SALT_SIZE 0
#define SALT_ALIGN 1
#ifdef SIMD_COEF_32
#define BLOCK_LOOPS 1
#define PLAINTEXT_LENGTH 27
#define MIN_KEYS_PER_CRYPT NBKEYS
#define MAX_KEYS_PER_CRYPT NBKEYS * BLOCK_LOOPS
#define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + ((i)&3) + (unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32*4 )
#else
#define PLAINTEXT_LENGTH 125
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#ifdef SIMD_COEF_32
static unsigned char (*saved_key);
static unsigned char (*crypt_key);
static unsigned int (**buf_ptr);
#else
static MD5_CTX ctx;
static int saved_len;
static UTF16 saved_key[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 crypt_key[BINARY_SIZE / 4];
#endif
/* Note some plaintexts will be replaced in init() if running UTF-8 */
static struct fmt_tests tests[] = {
{"16c47151c18ac087cd12b3a70746c790", "test1"},
{"d41d8cd98f00b204e9800998ecf8427e", ""},
{"d41d8cd98f00b204e9800998ecf8427e", ""},
{"d41d8cd98f00b204e9800998ecf8427e", ""},
{"d41d8cd98f00b204e9800998ecf8427e", ""},
{"d41d8cd98f00b204e9800998ecf8427e", ""},
{"9c3abef89ff76f8acd80eae37b35f64f", "test2"},
{"849ee1b88b5d887bdb058180a666b450", "test3"},
{"8c4cb7e8b33b56a833cdaa8673f3b425", "test4"},
{"537e738b1ac5551f65106368dc301ece", "thatsworking"},
// repeat first hash in exactly the same form that is used in john.pot
{"$dynamic_29$16c47151c18ac087cd12b3a70746c790", "test1"},
{NULL}
};
static void set_key_utf8(char *_key, int index);
static void set_key_CP(char *_key, int index);
static void init(struct fmt_main *self)
{
#if SIMD_COEF_32
int i;
#endif
if (options.target_enc == UTF_8) {
/* This avoids an if clause for every set_key */
self->methods.set_key = set_key_utf8;
#if SIMD_COEF_32
/* kick it up from 27. We will truncate in setkey_utf8() */
self->params.plaintext_length = 3 * PLAINTEXT_LENGTH;
#endif
tests[1].ciphertext = "94a4e171de16580742c4d141e6607bf7";
tests[1].plaintext = "\xE2\x82\xAC"; // Euro sign
tests[2].ciphertext = "03c60810f0e54d16e826aca385d776c8";
tests[2].plaintext = "\xE2\x82\xAC\xE2\x82\xAC"; // 2 x euro
tests[3].ciphertext = "2d554433d7cde7ec8d16aaf126c3be6b";
tests[3].plaintext = "\xE2\x82\xAC\xC3\xBC"; // euro and u-umlaut
tests[4].ciphertext = "8007d9070b27db7b30433df2cd10abc1";
tests[4].plaintext = "\xC3\xBC\xE2\x82\xAC"; // u-umlaut and euro
} else {
if (options.target_enc != ASCII &&
options.target_enc != ISO_8859_1) {
/* This avoids an if clause for every set_key */
self->methods.set_key = set_key_CP;
}
if (CP_to_Unicode[0xfc] == 0x00fc) {
tests[1].ciphertext = "ea7ab2b5c07650badab30790d0c9b63e";
tests[1].plaintext = "\xFC"; // German u-umlaut in iso-8859-1
tests[2].ciphertext = "f0a0b9f1dea0e458cec9a284ff434d44";
tests[2].plaintext = "\xFC\xFC";
tests[3].ciphertext = "d25a0b436b768777cc9a343d283dbf5a";
tests[3].plaintext = "\xFC\xFC\xFC";
tests[4].ciphertext = "719917322bf12168f8c55939e4fec8de";
tests[4].plaintext = "\xFC\xFC\xFC\xFC";
}
}
#if SIMD_COEF_32
saved_key = mem_calloc_align(sizeof(*saved_key), 64*self->params.max_keys_per_crypt, MEM_ALIGN_SIMD);
crypt_key = mem_calloc_align(sizeof(*crypt_key), BINARY_SIZE*self->params.max_keys_per_crypt, MEM_ALIGN_SIMD);
buf_ptr = mem_calloc_align(sizeof(*buf_ptr), self->params.max_keys_per_crypt, sizeof(*buf_ptr));
for (i=0; i<self->params.max_keys_per_crypt; i++)
buf_ptr[i] = (unsigned int*)&saved_key[GETPOS(0, i)];
#endif
}
static void done(void)
{
#ifdef SIMD_COEF_32
MEM_FREE(buf_ptr);
MEM_FREE(crypt_key);
MEM_FREE(saved_key);
#endif
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[32+12+1];
if (!strncmp(ciphertext, "$dynamic_29$", 12))
ciphertext += 12;
strcpy(out, "$dynamic_29$");
memcpy(&out[12], ciphertext, 32);
out[sizeof(out)-1] = 0;
strlwr(&out[12]);
return out;
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *pos;
if (!strncmp(ciphertext, "$dynamic_29$", 12))
ciphertext += 12;
for (pos = ciphertext; atoi16[ARCH_INDEX(*pos)] != 0x7F; pos++);
if (!*pos && pos - ciphertext == CIPHERTEXT_LENGTH)
return 1;
else
return 0;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned long dummy;
unsigned int i[BINARY_SIZE/sizeof(unsigned int)];
} _out;
unsigned int *out = _out.i;
unsigned int i;
unsigned int temp;
ciphertext+=12;
for (i=0; i<4; i++)
{
temp = ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+0])]))<<4;
temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+1])]));
temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+2])]))<<12;
temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+3])]))<<8;
temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+4])]))<<20;
temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+5])]))<<16;
temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+6])]))<<28;
temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+7])]))<<24;
#if ARCH_LITTLE_ENDIAN
out[i]=temp;
#else
out[i]=JOHNSWAP(temp);
#endif
}
return out;
}
// ISO-8859-1 to UCS-2, directly into vector key buffer
static void set_key(char *_key, int index)
{
#ifdef SIMD_COEF_32
const unsigned char *key = (unsigned char*)_key;
unsigned int *keybuf_word = buf_ptr[index];
unsigned int len, temp2;
len = 0;
while((temp2 = *key++)) {
unsigned int temp;
if ((temp = *key++) && len < PLAINTEXT_LENGTH - 1)
{
temp2 |= (temp << 16);
*keybuf_word = temp2;
}
else
{
temp2 |= (0x80 << 16);
*keybuf_word = temp2;
len++;
goto key_cleaning;
}
len += 2;
keybuf_word += SIMD_COEF_32;
}
*keybuf_word = 0x80;
key_cleaning:
keybuf_word += SIMD_COEF_32;
while(*keybuf_word) {
*keybuf_word = 0;
keybuf_word += SIMD_COEF_32;
}
((unsigned int *)saved_key)[14*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32] = len << 4;
#else
#if ARCH_LITTLE_ENDIAN
UTF8 *s = (UTF8*)_key;
UTF16 *d = saved_key;
while (*s)
*d++ = *s++;
*d = 0;
saved_len = (int)((char*)d - (char*)saved_key);
#else
UTF8 *s = (UTF8*)_key;
UTF8 *d = (UTF8*)saved_key;
while (*s) {
*d++ = *s++;
++d;
}
*d = 0;
saved_len = (int)((char*)d - (char*)saved_key);
#endif
#endif
}
// Legacy codepage to UCS-2, directly into vector key buffer
static void set_key_CP(char *_key, int index)
{
#ifdef SIMD_COEF_32
const unsigned char *key = (unsigned char*)_key;
unsigned int *keybuf_word = buf_ptr[index];
unsigned int len, temp2;
len = 0;
while((temp2 = *key++)) {
unsigned int temp;
temp2 = CP_to_Unicode[temp2];
if ((temp = *key++) && len < PLAINTEXT_LENGTH - 1)
{
temp = CP_to_Unicode[temp];
temp2 |= (temp << 16);
*keybuf_word = temp2;
} else {
temp2 |= (0x80 << 16);
*keybuf_word = temp2;
len++;
goto key_cleaning_enc;
}
len += 2;
keybuf_word += SIMD_COEF_32;
}
*keybuf_word = 0x80;
key_cleaning_enc:
keybuf_word += SIMD_COEF_32;
while(*keybuf_word) {
*keybuf_word = 0;
keybuf_word += SIMD_COEF_32;
}
((unsigned int *)saved_key)[14*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32] = len << 4;
#else
saved_len = enc_to_utf16((UTF16*)&saved_key,
PLAINTEXT_LENGTH + 1,
(unsigned char*)_key,
strlen(_key)) << 1;
if (saved_len < 0)
saved_len = strlen16(saved_key);
#endif
}
// UTF-8 to UCS-2, directly into vector key buffer
static void set_key_utf8(char *_key, int index)
{
#ifdef SIMD_COEF_32
const UTF8 *source = (UTF8*)_key;
unsigned int *keybuf_word = buf_ptr[index];
UTF32 chl, chh = 0x80;
unsigned int len = 0;
while (*source) {
chl = *source;
if (chl >= 0xC0) {
unsigned int extraBytesToRead = opt_trailingBytesUTF8[chl & 0x3f];
switch (extraBytesToRead) {
case 3:
++source;
if (*source) {
chl <<= 6;
chl += *source;
} else
goto bailout;
case 2:
++source;
if (*source) {
chl <<= 6;
chl += *source;
} else
goto bailout;
case 1:
++source;
if (*source) {
chl <<= 6;
chl += *source;
} else
goto bailout;
case 0:
break;
default:
goto bailout;
}
chl -= offsetsFromUTF8[extraBytesToRead];
}
source++;
len++;
if (chl > UNI_MAX_BMP) {
if (len == PLAINTEXT_LENGTH) {
chh = 0x80;
*keybuf_word = (chh << 16) | chl;
keybuf_word += SIMD_COEF_32;
break;
}
#define halfBase 0x0010000UL
#define halfShift 10
#define halfMask 0x3FFUL
#define UNI_SUR_HIGH_START (UTF32)0xD800
#define UNI_SUR_LOW_START (UTF32)0xDC00
chl -= halfBase;
chh = (UTF16)((chl & halfMask) + UNI_SUR_LOW_START);;
chl = (UTF16)((chl >> halfShift) + UNI_SUR_HIGH_START);
len++;
} else if (*source && len < PLAINTEXT_LENGTH) {
chh = *source;
if (chh >= 0xC0) {
unsigned int extraBytesToRead =
opt_trailingBytesUTF8[chh & 0x3f];
switch (extraBytesToRead) {
case 3:
++source;
if (*source) {
chl <<= 6;
chl += *source;
} else
goto bailout;
case 2:
++source;
if (*source) {
chh <<= 6;
chh += *source;
} else
goto bailout;
case 1:
++source;
if (*source) {
chh <<= 6;
chh += *source;
} else
goto bailout;
case 0:
break;
default:
goto bailout;
}
chh -= offsetsFromUTF8[extraBytesToRead];
}
source++;
len++;
} else {
chh = 0x80;
*keybuf_word = (chh << 16) | chl;
keybuf_word += SIMD_COEF_32;
break;
}
*keybuf_word = (chh << 16) | chl;
keybuf_word += SIMD_COEF_32;
}
if (chh != 0x80 || len == 0) {
*keybuf_word = 0x80;
keybuf_word += SIMD_COEF_32;
}
bailout:
while(*keybuf_word) {
*keybuf_word = 0;
keybuf_word += SIMD_COEF_32;
}
((unsigned int *)saved_key)[14*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32] = len << 4;
#else
saved_len = utf8_to_utf16((UTF16*)&saved_key,
PLAINTEXT_LENGTH + 1,
(unsigned char*)_key,
strlen(_key)) << 1;
if (saved_len < 0)
saved_len = strlen16(saved_key);
#endif
}
static char *get_key(int index)
{
#ifdef SIMD_COEF_32
// Get the key back from the key buffer, from UCS-2
unsigned int *keybuffer = (unsigned int*)&saved_key[GETPOS(0, index)];
static UTF16 key[PLAINTEXT_LENGTH + 1 + 1]; // if only +1 we 'can' overflow. Not sure why, but ASan found it.
unsigned int md5_size=0;
unsigned int i=0;
for(; md5_size < PLAINTEXT_LENGTH; i += SIMD_COEF_32, md5_size++)
{
key[md5_size] = keybuffer[i];
key[md5_size+1] = keybuffer[i] >> 16;
if (key[md5_size] == 0x80 && key[md5_size+1] == 0) {
key[md5_size] = 0;
break;
}
++md5_size;
if (key[md5_size] == 0x80 && ((keybuffer[i+SIMD_COEF_32]&0xFFFF) == 0 || md5_size == PLAINTEXT_LENGTH)) {
key[md5_size] = 0;
break;
}
}
return (char*)utf16_to_enc(key);
#else
return (char*)utf16_to_enc(saved_key);
#endif
}
static int cmp_all(void *binary, int count) {
#ifdef SIMD_COEF_32
unsigned int x,y=0;
for(;y<SIMD_PARA_MD5*BLOCK_LOOPS;y++)
for(x=0;x<SIMD_COEF_32;x++)
{
if( ((ARCH_WORD_32*)binary)[0] == ((ARCH_WORD_32*)crypt_key)[x+y*SIMD_COEF_32*4] )
return 1;
}
return 0;
#else
return !memcmp(binary, crypt_key, BINARY_SIZE);
#endif
}
static int cmp_exact(char *source, int index)
{
return (1);
}
static int cmp_one(void *binary, int index)
{
#ifdef SIMD_COEF_32
unsigned int x,y;
x = index&(SIMD_COEF_32-1);
y = (unsigned int)index/SIMD_COEF_32;
if( ((ARCH_WORD_32*)binary)[0] != ((ARCH_WORD_32*)crypt_key)[x+y*SIMD_COEF_32*4] )
return 0;
if( ((ARCH_WORD_32*)binary)[1] != ((ARCH_WORD_32*)crypt_key)[x+y*SIMD_COEF_32*4+SIMD_COEF_32] )
return 0;
if( ((ARCH_WORD_32*)binary)[2] != ((ARCH_WORD_32*)crypt_key)[x+y*SIMD_COEF_32*4+2*SIMD_COEF_32] )
return 0;
if( ((ARCH_WORD_32*)binary)[3] != ((ARCH_WORD_32*)crypt_key)[x+y*SIMD_COEF_32*4+3*SIMD_COEF_32] )
return 0;
return 1;
#else
return !memcmp(binary, crypt_key, BINARY_SIZE);
#endif
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
#if defined(SIMD_COEF_32)
#if (BLOCK_LOOPS > 1)
int i;
// This was an experiment. It's not used (unless you bump BLOCK_LOOPS),
// cause it does not scale well. We would need to parallelize set_key()
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < BLOCK_LOOPS; i++)
SIMDmd5body(&saved_key[i*NBKEYS*64], (unsigned int*)&crypt_key[i*NBKEYS*BINARY_SIZE], NULL, SSEi_MIXED_IN);
#else
SIMDmd5body(saved_key, (unsigned int*)crypt_key, NULL, SSEi_MIXED_IN);
#endif
#else
MD5_Init( &ctx );
MD5_Update(&ctx, (unsigned char*)saved_key, saved_len);
MD5_Final((unsigned char*) crypt_key, &ctx);
#endif
return count;
}
#ifdef SIMD_COEF_32
static int get_hash_0(int index)
{
unsigned int x,y;
x = index&(SIMD_COEF_32-1);
y = (unsigned int)index/SIMD_COEF_32;
return ((ARCH_WORD_32*)crypt_key)[x+y*SIMD_COEF_32*4] & PH_MASK_0;
}
static int get_hash_1(int index)
{
unsigned int x,y;
x = index&(SIMD_COEF_32-1);
y = (unsigned int)index/SIMD_COEF_32;
return ((ARCH_WORD_32*)crypt_key)[x+y*SIMD_COEF_32*4] & PH_MASK_1;
}
static int get_hash_2(int index)
{
unsigned int x,y;
x = index&(SIMD_COEF_32-1);
y = (unsigned int)index/SIMD_COEF_32;
return ((ARCH_WORD_32*)crypt_key)[x+y*SIMD_COEF_32*4] & PH_MASK_2;
}
static int get_hash_3(int index)
{
unsigned int x,y;
x = index&(SIMD_COEF_32-1);
y = (unsigned int)index/SIMD_COEF_32;
return ((ARCH_WORD_32*)crypt_key)[x+y*SIMD_COEF_32*4] & PH_MASK_3;
}
static int get_hash_4(int index)
{
unsigned int x,y;
x = index&(SIMD_COEF_32-1);
y = (unsigned int)index/SIMD_COEF_32;
return ((ARCH_WORD_32*)crypt_key)[x+y*SIMD_COEF_32*4] & PH_MASK_4;
}
static int get_hash_5(int index)
{
unsigned int x,y;
x = index&(SIMD_COEF_32-1);
y = (unsigned int)index/SIMD_COEF_32;
return ((ARCH_WORD_32*)crypt_key)[x+y*SIMD_COEF_32*4] & PH_MASK_5;
}
static int get_hash_6(int index)
{
unsigned int x,y;
x = index&(SIMD_COEF_32-1);
y = (unsigned int)index/SIMD_COEF_32;
return ((ARCH_WORD_32*)crypt_key)[x+y*SIMD_COEF_32*4] & PH_MASK_6;
}
#else
static int get_hash_0(int index) { return ((ARCH_WORD_32*)crypt_key)[index] & PH_MASK_0; }
static int get_hash_1(int index) { return ((ARCH_WORD_32*)crypt_key)[index] & PH_MASK_1; }
static int get_hash_2(int index) { return ((ARCH_WORD_32*)crypt_key)[index] & PH_MASK_2; }
static int get_hash_3(int index) { return ((ARCH_WORD_32*)crypt_key)[index] & PH_MASK_3; }
static int get_hash_4(int index) { return ((ARCH_WORD_32*)crypt_key)[index] & PH_MASK_4; }
static int get_hash_5(int index) { return ((ARCH_WORD_32*)crypt_key)[index] & PH_MASK_5; }
static int get_hash_6(int index) { return ((ARCH_WORD_32*)crypt_key)[index] & PH_MASK_6; }
#endif
struct fmt_main fmt_rawmd5uthick = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#if (BLOCK_LOOPS > 1) && defined(SSE_MD5_PARA)
FMT_OMP |
#endif
FMT_CASE | FMT_8_BIT | FMT_UNICODE | FMT_UTF8 | FMT_SPLIT_UNIFIES_CASE,
{ NULL },
{ NULL },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
get_binary,
fmt_default_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
fmt_default_set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
GB_subassign_11.c | //------------------------------------------------------------------------------
// GB_subassign_11: C(I,J)<M,repl> += scalar ; using S
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Method 11: C(I,J)<M,repl> += scalar ; using S
// M: present
// Mask_comp: false
// C_replace: true
// accum: present
// A: scalar
// S: constructed
// C, M: not bitmap
#include "GB_unused.h"
#include "GB_subassign_methods.h"
GrB_Info GB_subassign_11
(
GrB_Matrix C,
// input:
const GrB_Index *I,
const int64_t ni,
const int64_t nI,
const int Ikind,
const int64_t Icolon [3],
const GrB_Index *J,
const int64_t nj,
const int64_t nJ,
const int Jkind,
const int64_t Jcolon [3],
const GrB_Matrix M,
const bool Mask_struct,
const GrB_BinaryOp accum,
const void *scalar,
const GrB_Type atype,
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (!GB_IS_BITMAP (C)) ; ASSERT (!GB_IS_FULL (C)) ;
ASSERT (!GB_aliased (C, M)) ; // NO ALIAS of C==M
//--------------------------------------------------------------------------
// S = C(I,J)
//--------------------------------------------------------------------------
GB_EMPTY_TASKLIST ;
GB_OK (GB_subassign_symbolic (S, C, I, ni, J, nj, true, Context)) ;
//--------------------------------------------------------------------------
// get inputs
//--------------------------------------------------------------------------
GB_MATRIX_WAIT_IF_JUMBLED (M) ;
GB_GET_C ; // C must not be bitmap
GB_GET_MASK ;
GB_GET_ACCUM_SCALAR ;
GB_GET_S ;
//--------------------------------------------------------------------------
// Method 11: C(I,J)<M,repl> += scalar ; using S
//--------------------------------------------------------------------------
// Time: Optimal. All entries in M+S must be examined. All entries in S
// are modified: if M(i,j)=1 then S(i,j) is used to write to the
// corresponding entry in C. If M(i,j) is not present, or zero, then the
// entry in C is cleared (because of C_replace). If S(i,j) is not present,
// and M(i,j)=1, then the scalar is inserted into C. The only case that
// can be skipped is if neither S nor M is present. As a result, this
// method need not traverse all of IxJ. It can limit its traversal to the
// pattern of M+S.
// Method 09 and Method 11 are very similar.
//--------------------------------------------------------------------------
// Parallel: M+S (Methods 02, 04, 09, 10, 11, 12, 14, 16, 18, 20)
//--------------------------------------------------------------------------
if (M_is_bitmap)
{
// all of IxJ must be examined
GB_SUBASSIGN_IXJ_SLICE ;
}
else
{
// traverse all M+S
GB_SUBASSIGN_TWO_SLICE (M, S) ;
}
//--------------------------------------------------------------------------
// phase 1: create zombies, update entries, and count pending tuples
//--------------------------------------------------------------------------
if (M_is_bitmap)
{
//----------------------------------------------------------------------
// phase1: M is bitmap
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nzombies)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//------------------------------------------------------------------
// get the task descriptor
//------------------------------------------------------------------
GB_GET_IXJ_TASK_DESCRIPTOR_PHASE1 (iM_start, iM_end) ;
//------------------------------------------------------------------
// compute all vectors in this task
//------------------------------------------------------------------
for (int64_t j = kfirst ; j <= klast ; j++)
{
//--------------------------------------------------------------
// get S(iM_start:iM_end,j)
//--------------------------------------------------------------
GB_GET_VECTOR_FOR_IXJ (S, iM_start) ;
int64_t pM_start = j * Mvlen ;
//--------------------------------------------------------------
// do a 2-way merge of S(iM_start:iM_end,j) and M(ditto,j)
//--------------------------------------------------------------
for (int64_t iM = iM_start ; iM < iM_end ; iM++)
{
int64_t pM = pM_start + iM ;
bool Sfound = (pS < pS_end) && (GBI (Si, pS, Svlen) == iM) ;
bool mij = Mb [pM] && GB_mcast (Mx, pM, msize) ;
if (Sfound && !mij)
{
// S (i,j) is present but M (i,j) is false
// ----[C A 0] or [X A 0]-------------------------------
// [X A 0]: action: ( X ): still a zombie
// [C A 0]: C_repl: action: ( delete ): becomes zombie
GB_C_S_LOOKUP ;
GB_DELETE_ENTRY ;
GB_NEXT (S) ;
}
else if (!Sfound && mij)
{
// S (i,j) is not present, M (i,j) is true
// ----[. A 1]------------------------------------------
// [. A 1]: action: ( insert )
task_pending++ ;
}
else if (Sfound && mij)
{
// S (i,j) present and M (i,j) is true
GB_C_S_LOOKUP ;
// ----[C A 1] or [X A 1]-------------------------------
// [C A 1]: action: ( =C+A ): apply accum
// [X A 1]: action: ( undelete ): zombie lives
GB_withaccum_C_A_1_scalar ;
GB_NEXT (S) ;
}
}
}
GB_PHASE1_TASK_WRAPUP ;
}
}
else
{
//----------------------------------------------------------------------
// phase1: M is hypersparse, sparse, or full
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nzombies)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//------------------------------------------------------------------
// get the task descriptor
//------------------------------------------------------------------
GB_GET_TASK_DESCRIPTOR_PHASE1 ;
//------------------------------------------------------------------
// compute all vectors in this task
//------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//--------------------------------------------------------------
// get S(:,j) and M(:,j)
//--------------------------------------------------------------
int64_t j = GBH (Zh, k) ;
GB_GET_MAPPED (pM, pM_end, pA, pA_end, Mp, j, k, Z_to_X, Mvlen);
GB_GET_MAPPED (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S, Svlen);
//--------------------------------------------------------------
// do a 2-way merge of S(:,j) and M(:,j)
//--------------------------------------------------------------
// jC = J [j] ; or J is a colon expression
// int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
// while both list S (:,j) and M (:,j) have entries
while (pS < pS_end && pM < pM_end)
{
int64_t iS = GBI (Si, pS, Svlen) ;
int64_t iM = GBI (Mi, pM, Mvlen) ;
if (iS < iM)
{
// S (i,j) is present but M (i,j) is not
// ----[C A 0] or [X A 0]-------------------------------
// [X A 0]: action: ( X ): still a zombie
// [C A 0]: C_repl: action: ( delete ): becomes zombie
GB_C_S_LOOKUP ;
GB_DELETE_ENTRY ;
GB_NEXT (S) ;
}
else if (iM < iS)
{
// S (i,j) is not present, M (i,j) is present
if (GB_mcast (Mx, pM, msize))
{
// ----[. A 1]--------------------------------------
// [. A 1]: action: ( insert )
task_pending++ ;
}
GB_NEXT (M) ;
}
else
{
// both S (i,j) and M (i,j) present
GB_C_S_LOOKUP ;
if (GB_mcast (Mx, pM, msize))
{
// ----[C A 1] or [X A 1]---------------------------
// [C A 1]: action: ( =C+A ): apply accum
// [X A 1]: action: ( undelete ): zombie lives
GB_withaccum_C_A_1_scalar ;
}
else
{
// ----[C A 0] or [X A 0]---------------------------
// [X A 0]: action: ( X ): still a zombie
// [C A 0]: C_repl: action: ( delete ): now zombie
GB_DELETE_ENTRY ;
}
GB_NEXT (S) ;
GB_NEXT (M) ;
}
}
// while list S (:,j) has entries. List M (:,j) exhausted.
while (pS < pS_end)
{
// S (i,j) is present but M (i,j) is not
// ----[C A 0] or [X A 0]-----------------------------------
// [X A 0]: action: ( X ): still a zombie
// [C A 0]: C_repl: action: ( delete ): becomes zombie
GB_C_S_LOOKUP ;
GB_DELETE_ENTRY ;
GB_NEXT (S) ;
}
// while list M (:,j) has entries. List S (:,j) exhausted.
while (pM < pM_end)
{
// S (i,j) is not present, M (i,j) is present
if (GB_mcast (Mx, pM, msize))
{
// ----[. A 1]------------------------------------------
// [. A 1]: action: ( insert )
task_pending++ ;
}
GB_NEXT (M) ;
}
}
GB_PHASE1_TASK_WRAPUP ;
}
}
//--------------------------------------------------------------------------
// phase 2: insert pending tuples
//--------------------------------------------------------------------------
GB_PENDING_CUMSUM ;
if (M_is_bitmap)
{
//----------------------------------------------------------------------
// phase2: M is bitmap
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(&&:pending_sorted)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//------------------------------------------------------------------
// get the task descriptor
//------------------------------------------------------------------
GB_GET_IXJ_TASK_DESCRIPTOR_PHASE2 (iM_start, iM_end) ;
//------------------------------------------------------------------
// compute all vectors in this task
//------------------------------------------------------------------
for (int64_t j = kfirst ; j <= klast ; j++)
{
//--------------------------------------------------------------
// get S(iM_start:iM_end,j)
//--------------------------------------------------------------
GB_GET_VECTOR_FOR_IXJ (S, iM_start) ;
int64_t pM_start = j * Mvlen ;
//--------------------------------------------------------------
// do a 2-way merge of S(iM_start:iM_end,j) and M(ditto,j)
//--------------------------------------------------------------
// jC = J [j] ; or J is a colon expression
int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
for (int64_t iM = iM_start ; iM < iM_end ; iM++)
{
int64_t pM = pM_start + iM ;
bool Sfound = (pS < pS_end) && (GBI (Si, pS, Svlen) == iM) ;
bool mij = Mb [pM] && GB_mcast (Mx, pM, msize) ;
if (!Sfound && mij)
{
// S (i,j) is not present, M (i,j) is true
// ----[. A 1]------------------------------------------
// [. A 1]: action: ( insert )
int64_t iC = GB_ijlist (I, iM, Ikind, Icolon) ;
GB_PENDING_INSERT (scalar) ;
}
else if (Sfound)
{
// S (i,j) present
GB_NEXT (S) ;
}
}
}
GB_PHASE2_TASK_WRAPUP ;
}
}
else
{
//----------------------------------------------------------------------
// phase2: M is hypersparse, sparse, or full
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(&&:pending_sorted)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//------------------------------------------------------------------
// get the task descriptor
//------------------------------------------------------------------
GB_GET_TASK_DESCRIPTOR_PHASE2 ;
//------------------------------------------------------------------
// compute all vectors in this task
//------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//--------------------------------------------------------------
// get S(:,j) and M(:,j)
//--------------------------------------------------------------
int64_t j = GBH (Zh, k) ;
GB_GET_MAPPED (pM, pM_end, pA, pA_end, Mp, j, k, Z_to_X, Mvlen);
GB_GET_MAPPED (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S, Svlen);
//--------------------------------------------------------------
// do a 2-way merge of S(:,j) and M(:,j)
//--------------------------------------------------------------
// jC = J [j] ; or J is a colon expression
int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
// while both list S (:,j) and M (:,j) have entries
while (pS < pS_end && pM < pM_end)
{
int64_t iS = GBI (Si, pS, Svlen) ;
int64_t iM = GBI (Mi, pM, Mvlen) ;
if (iS < iM)
{
// S (i,j) is present but M (i,j) is not
GB_NEXT (S) ;
}
else if (iM < iS)
{
// S (i,j) is not present, M (i,j) is present
if (GB_mcast (Mx, pM, msize))
{
// ----[. A 1]--------------------------------------
// [. A 1]: action: ( insert )
int64_t iC = GB_ijlist (I, iM, Ikind, Icolon) ;
GB_PENDING_INSERT (scalar) ;
}
GB_NEXT (M) ;
}
else
{
// both S (i,j) and M (i,j) present
GB_NEXT (S) ;
GB_NEXT (M) ;
}
}
// while list M (:,j) has entries. List S (:,j) exhausted.
while (pM < pM_end)
{
// S (i,j) is not present, M (i,j) is present
if (GB_mcast (Mx, pM, msize))
{
// ----[. A 1]------------------------------------------
// [. A 1]: action: ( insert )
int64_t iM = GBI (Mi, pM, Mvlen) ;
int64_t iC = GB_ijlist (I, iM, Ikind, Icolon) ;
GB_PENDING_INSERT (scalar) ;
}
GB_NEXT (M) ;
}
}
GB_PHASE2_TASK_WRAPUP ;
}
}
//--------------------------------------------------------------------------
// finalize the matrix and return result
//--------------------------------------------------------------------------
GB_SUBASSIGN_WRAPUP ;
}
|
oldoffice_fmt_plug.c | /*
* MS Office 97-2003 cracker patch for JtR. Hacked together during May of
* 2012 by Dhiru Kholia <dhiru.kholia at gmail.com>.
*
* This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>
* Copyright (c) 2014, magnum
* Copyright (c) 2009, David Leblanc (http://offcrypto.codeplex.com/)
*
* License: Microsoft Public License (MS-PL)
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_oldoffice;
#elif FMT_REGISTERS_H
john_register_one(&fmt_oldoffice);
#else
#include <string.h>
#include <errno.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "md5.h"
#include "rc4.h"
#include "stdint.h"
#include "sha.h"
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "unicode.h"
#include "dyna_salt.h"
#include "memdbg.h"
#ifndef OMP_SCALE
#define OMP_SCALE 256
#endif
#define FORMAT_LABEL "oldoffice"
#define FORMAT_NAME "MS Office <= 2003"
#define ALGORITHM_NAME "MD5/SHA1 RC4 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1000
#define PLAINTEXT_LENGTH 64
#define BINARY_SIZE 0
#define BINARY_ALIGN MEM_ALIGN_NONE
#define SALT_SIZE sizeof(dyna_salt*)
#define SALT_ALIGN MEM_ALIGN_WORD
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define CIPHERTEXT_LENGTH (TAG_LEN + 120)
#define FORMAT_TAG "$oldoffice$"
#define TAG_LEN (sizeof(FORMAT_TAG) - 1)
static struct fmt_tests oo_tests[] = {
{"$oldoffice$1*de17a7f3c3ff03a39937ba9666d6e952*2374d5b6ce7449f57c9f252f9f9b53d2*e60e1185f7aecedba262f869c0236f81", "test"},
{"$oldoffice$0*e40b4fdade5be6be329c4238e2099b8a*259590322b55f7a3c38cb96b5864e72d*2e6516bfaf981770fe6819a34998295d", "123456789012345"},
{"$oldoffice$4*163ae8c43577b94902f58d0106b29205*87deff24175c2414cb1b2abdd30855a3*4182446a527fe4648dffa792d55ae7a15edfc4fb", "Google123"},
/* Meet-in-the-middle candidate produced with oclHashcat -m9710 */
/* Real pw is "hashcat", one collision is "zvDtu!" */
{"", "zvDtu!", {"", "$oldoffice$1*d6aabb63363188b9b73a88efb9c9152e*afbbb9254764273f8f4fad9a5d82981f*6f09fd2eafc4ade522b5f2bee0eaf66d","f2ab1219ae"} },
#if PLAINTEXT_LENGTH >= 24
/* 2003-RC4-40bit-MS-Base-Crypto-1.0_myhovercraftisfullofeels_.doc */
{"$oldoffice$3*9f32522fe9bcb69b12f39d3c24b39b2f*fac8b91a8a578468ae7001df4947558f*f2e267a5bea45736b52d6d1051eca1b935eabf3a", "myhovercraftisfullofeels"},
/* Test-RC4-40bit-MS-Base-DSS_myhovercraftisfullofeels_.doc */
{"$oldoffice$3*095b777a73a10fb6bcd3e48d50f8f8c5*36902daab0d0f38f587a84b24bd40dce*25db453f79e8cbe4da1844822b88f6ce18a5edd2", "myhovercraftisfullofeels"},
/* 2003-RC4-40bit-MS-Base-DH-SChan_myhovercraftisfullofeels_.doc */
{"$oldoffice$3*284bc91cb64bc847a7a44bc7bf34fb69*1f8c589c6fcbd43c42b2bc6fff4fd12b*2bc7d8e866c9ea40526d3c0a59e2d37d8ded3550", "myhovercraftisfullofeels"},
/* Test-RC4-128bit-MS-Strong-Crypto_myhovercraftisfullofeels_.doc */
{"$oldoffice$4*a58b39c30a06832ee664c1db48d17304*986a45cc9e17e062f05ceec37ec0db17*fe0c130ef374088f3fec1979aed4d67459a6eb9a", "myhovercraftisfullofeels"},
/* 2003-RC4-40bit-MS-Base-1.0_myhovercraftisfullofeels_.xls */
{"$oldoffice$3*f426041b2eba9745d30c7949801f7d3a*888b34927e5f31e2703cc4ce86a6fd78*ff66200812fd06c1ba43ec2be9f3390addb20096", "myhovercraftisfullofeels"},
#endif
/* the following hash was extracted from Proc2356.ppt (manually + by oldoffice2john.py */
{"$oldoffice$3*DB575DDA2E450AB3DFDF77A2E9B3D4C7*AB183C4C8B5E5DD7B9F3AF8AE5FFF31A*B63594447FAE7D4945D2DAFD113FD8C9F6191BF5", "crypto"},
{"$oldoffice$3*3fbf56a18b026e25815cbea85a16036c*216562ea03b4165b54cfaabe89d36596*91308b40297b7ce31af2e8c57c6407994b205590", "openwall"},
{NULL}
};
/* Password encoded in UCS-2 */
static UTF16 (*saved_key)[PLAINTEXT_LENGTH + 1];
/* UCS-2 password length, in octets */
static int *saved_len;
/* Last hash with this salt and plain */
static unsigned char (*mitm_key)[16];
static unsigned char (*rc4_key)[16];
static int any_cracked, *cracked;
static size_t cracked_size;
static int new_keys;
typedef struct {
dyna_salt dsalt;
int type;
unsigned char salt[16];
unsigned char verifier[16]; /* or encryptedVerifier */
unsigned char verifierHash[20]; /* or encryptedVerifierHash */
unsigned int has_mitm;
unsigned char mitm[5]; /* Meet-in-the-middle hint, if we have one */
} custom_salt;
static struct {
int ct_hash;
unsigned char mitm[10];
} mitm_catcher;
static custom_salt cs;
static custom_salt *cur_salt = &cs;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = 1;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
if (options.target_enc == UTF_8)
self->params.plaintext_length = 3 * PLAINTEXT_LENGTH > 125 ?
125 : 3 * PLAINTEXT_LENGTH;
saved_key = mem_alloc(self->params.max_keys_per_crypt *
sizeof(*saved_key));
saved_len = mem_alloc(self->params.max_keys_per_crypt *
sizeof(*saved_len));
mitm_key = mem_alloc(self->params.max_keys_per_crypt *
sizeof(*mitm_key));
rc4_key = mem_alloc(self->params.max_keys_per_crypt *
sizeof(*rc4_key));
any_cracked = 0;
cracked_size = sizeof(*cracked) * self->params.max_keys_per_crypt;
cracked = mem_calloc(1, cracked_size);
}
static void done(void)
{
MEM_FREE(cracked);
MEM_FREE(rc4_key);
MEM_FREE(mitm_key);
MEM_FREE(saved_len);
MEM_FREE(saved_key);
}
/* Based on ldr_cracked_hash from loader.c */
#define HASH_LOG 30
#define HASH_SIZE (1 << HASH_LOG)
static int hex_hash(char *ciphertext)
{
unsigned int hash, extra;
unsigned char *p = (unsigned char *)ciphertext;
hash = p[0] | 0x20; /* ASCII case insensitive */
if (!hash)
goto out;
extra = p[1] | 0x20;
if (!extra)
goto out;
p += 2;
while (*p) {
hash <<= 1; extra <<= 1;
hash += p[0] | 0x20;
if (!p[1]) break;
extra += p[1] | 0x20;
p += 2;
if (hash & 0xe0000000) {
hash ^= hash >> HASH_LOG;
extra ^= extra >> (HASH_LOG - 1);
hash &= HASH_SIZE - 1;
}
}
hash -= extra;
hash ^= extra << (HASH_LOG / 2);
hash ^= hash >> HASH_LOG;
hash &= HASH_SIZE - 1;
out:
return hash;
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy, *ptr, *keeptr;
int type, extra;
if (strncmp(ciphertext, FORMAT_TAG, TAG_LEN))
return 0;
if (strlen(ciphertext) > CIPHERTEXT_LENGTH)
return 0;
if (!(ctcopy = strdup(ciphertext)))
return 0;
keeptr = ctcopy;
ctcopy += TAG_LEN;
if (!(ptr = strtokm(ctcopy, "*"))) /* type */
goto error;
type = atoi(ptr);
if (type < 0 || type > 4)
goto error;
if (!(ptr = strtokm(NULL, "*"))) /* salt */
goto error;
if (hexlen(ptr, &extra) != 32 || extra)
goto error;
if (!(ptr = strtokm(NULL, "*"))) /* verifier */
goto error;
if (hexlen(ptr, &extra) != 32 || extra)
goto error;
if (!(ptr = strtokm(NULL, "*"))) /* verifier hash */
goto error;
if (type < 3 && (hexlen(ptr, &extra) != 32 || extra))
goto error;
else if (type >= 3 && (hexlen(ptr, &extra) != 40 || extra))
goto error;
/*
* Deprecated field: mitm hash (40-bit RC4). The new way to put it is in the
* uid field, like hashcat's example hash.
*/
if (type <= 3 && (ptr = strtokm(NULL, "*"))) {
if (hexlen(ptr, &extra) != 10 || extra)
goto error;
}
MEM_FREE(keeptr);
return 1;
error:
MEM_FREE(keeptr);
return 0;
}
/* uid field may contain a meet-in-the-middle hash */
static char *prepare(char *split_fields[10], struct fmt_main *self)
{
if (split_fields[0] && valid(split_fields[0], self) && split_fields[1] &&
hexlen(split_fields[1], 0) == 10) {
mitm_catcher.ct_hash = hex_hash(split_fields[0]);
memcpy(mitm_catcher.mitm, split_fields[1], 10);
return split_fields[0];
}
else if (valid(split_fields[1], self) && split_fields[2] &&
hexlen(split_fields[2], 0) == 10) {
mitm_catcher.ct_hash = hex_hash(split_fields[1]);
memcpy(mitm_catcher.mitm, split_fields[2], 10);
}
return split_fields[1];
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[CIPHERTEXT_LENGTH];
char *p;
int extra;
strnzcpy(out, ciphertext, sizeof(out));
strlwr(out);
/* Drop legacy embedded MITM hash */
if ((p = strrchr(out, '*')) && (hexlen(&p[1], &extra) == 10 || extra))
*p = 0;
return out;
}
static void *get_salt(char *ciphertext)
{
static void *ptr;
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
char *p;
int i;
memset(&cs, 0, sizeof(cs));
ctcopy += TAG_LEN; /* skip over "$oldoffice$" */
p = strtokm(ctcopy, "*");
cs.type = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < 16; i++)
cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
for (i = 0; i < 16; i++)
cs.verifier[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
if (cs.type < 3) {
for (i = 0; i < 16; i++)
cs.verifierHash[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
}
else {
for (i = 0; i < 20; i++)
cs.verifierHash[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
}
if ((p = strtokm(NULL, "*"))) { /* Deprecated field */
cs.has_mitm = 1;
for (i = 0; i < 5; i++)
cs.mitm[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
} else
if (hex_hash(ciphertext) == mitm_catcher.ct_hash) {
cs.has_mitm = 1;
for (i = 0; i < 5; i++)
cs.mitm[i] = atoi16[ARCH_INDEX(mitm_catcher.mitm[i * 2])] * 16
+ atoi16[ARCH_INDEX(mitm_catcher.mitm[i * 2 + 1])];
} else
cs.has_mitm = 0;
MEM_FREE(keeptr);
cs.dsalt.salt_cmp_offset = SALT_CMP_OFF(custom_salt, type);
cs.dsalt.salt_cmp_size = SALT_CMP_SIZE(custom_salt, type, has_mitm, 0);
cs.dsalt.salt_alloc_needs_free = 0;
ptr = mem_alloc_copy(&cs, sizeof(custom_salt), MEM_ALIGN_WORD);
return &ptr;
}
static char *source(char *source, void *binary)
{
static char Buf[CIPHERTEXT_LENGTH];
unsigned char *cpi, *cp = (unsigned char*)Buf;
int i, len;
extern volatile int bench_running;
cp += sprintf(Buf, "%s%d*", FORMAT_TAG, cur_salt->type);
cpi = cur_salt->salt;
for (i = 0; i < 16; i++) {
*cp++ = itoa16[*cpi >> 4];
*cp++ = itoa16[*cpi & 0xf];
cpi++;
}
*cp++ = '*';
cpi = cur_salt->verifier;
for (i = 0; i < 16; i++) {
*cp++ = itoa16[*cpi >> 4];
*cp++ = itoa16[*cpi & 0xf];
cpi++;
}
*cp++ = '*';
len = (cur_salt->type < 3) ? 16 : 20;
cpi = cur_salt->verifierHash;
for (i = 0; i < len; i++) {
*cp++ = itoa16[*cpi >> 4];
*cp++ = itoa16[*cpi & 0xf];
cpi++;
}
*cp = 0;
if (cur_salt->type < 4 && cur_salt->has_mitm && !bench_running) {
static int last;
char out[11];
if (last != hex_hash(Buf)) {
last = hex_hash(Buf);
cpi = cur_salt->mitm;
for (i = 0; i < 5; i++) {
out[2 * i + 0] = itoa16[*cpi >> 4];
out[2 * i + 1] = itoa16[*cpi & 0xf];
cpi++;
}
out[10] = 0;
fprintf(stderr, "MITM key: %s\n", out);
}
}
return Buf;
}
static void set_salt(void *salt)
{
if (memcmp(cur_salt->salt, (*(custom_salt**)salt)->salt, 16))
new_keys = 1;
cur_salt = *(custom_salt**)salt;
}
static int salt_compare(const void *x, const void *y)
{
int c;
c = memcmp((*(custom_salt**)x)->salt, (*(custom_salt**)y)->salt, 16);
if (c)
return c;
c = dyna_salt_cmp((void*)x, (void*)y, SALT_SIZE);
return c;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
if (any_cracked) {
memset(cracked, 0, cracked_size);
any_cracked = 0;
}
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++)
{
int i;
RC4_KEY key;
if (cur_salt->type < 3) {
MD5_CTX ctx;
unsigned char pwdHash[16];
unsigned char hashBuf[21 * 16];
if (new_keys) {
unsigned char key_hash[16];
MD5_Init(&ctx);
MD5_Update(&ctx, saved_key[index], saved_len[index]);
MD5_Final(key_hash, &ctx);
for (i = 0; i < 16; i++) {
memcpy(hashBuf + i * 21, key_hash, 5);
memcpy(hashBuf + i * 21 + 5, cur_salt->salt, 16);
}
MD5_Init(&ctx);
MD5_Update(&ctx, hashBuf, 21 * 16);
MD5_Final(mitm_key[index], &ctx);
}
// Early reject if we got a hint
if (cur_salt->has_mitm &&
memcmp(mitm_key[index], cur_salt->mitm, 5))
continue;
if (new_keys) {
memcpy(hashBuf, mitm_key[index], 5);
memset(hashBuf + 5, 0, 4);
MD5_Init(&ctx);
MD5_Update(&ctx, hashBuf, 9);
MD5_Final(rc4_key[index], &ctx);
}
RC4_set_key(&key, 16, rc4_key[index]); /* rc4Key */
RC4(&key, 16, cur_salt->verifier, hashBuf); /* encryptedVerifier */
RC4(&key, 16, cur_salt->verifierHash, hashBuf + 16); /* encryptedVerifierHash */
/* hash the decrypted verifier */
MD5_Init(&ctx);
MD5_Update(&ctx, hashBuf, 16);
MD5_Final(pwdHash, &ctx);
if (!memcmp(pwdHash, hashBuf + 16, 16)) {
#ifdef _OPENMP
#pragma omp critical
#endif
{
any_cracked = cracked[index] = 1;
cur_salt->has_mitm = 1;
memcpy(cur_salt->mitm, mitm_key[index], 5);
}
}
}
else {
SHA_CTX ctx;
unsigned char H0[24];
unsigned char Hfinal[20];
unsigned char DecryptedVerifier[16];
unsigned char DecryptedVerifierHash[20];
if (new_keys) {
unsigned char key_hash[20];
SHA1_Init(&ctx);
SHA1_Update(&ctx, cur_salt->salt, 16);
SHA1_Update(&ctx, saved_key[index], saved_len[index]);
SHA1_Final(H0, &ctx);
memset(&H0[20], 0, 4);
SHA1_Init(&ctx);
SHA1_Update(&ctx, H0, 24);
SHA1_Final(key_hash, &ctx);
if (cur_salt->type < 4) {
memcpy(mitm_key[index], key_hash, 5);
memset(&mitm_key[index][5], 0, 11);
} else
memcpy(mitm_key[index], key_hash, 16);
}
// Early reject if we got a hint
if (cur_salt->has_mitm &&
memcmp(mitm_key[index], cur_salt->mitm, 5))
continue;
RC4_set_key(&key, 16, mitm_key[index]); /* dek */
RC4(&key, 16, cur_salt->verifier, DecryptedVerifier);
RC4(&key, 16, cur_salt->verifierHash, DecryptedVerifierHash);
SHA1_Init(&ctx);
SHA1_Update(&ctx, DecryptedVerifier, 16);
SHA1_Final(Hfinal, &ctx);
if (!memcmp(Hfinal, DecryptedVerifierHash, 16)) {
#ifdef _OPENMP
#pragma omp critical
#endif
{
any_cracked = cracked[index] = 1;
if (cur_salt->type < 4) {
cur_salt->has_mitm = 1;
memcpy(cur_salt->mitm, mitm_key[index], 5);
}
}
}
}
}
new_keys = 0;
return count;
}
static int cmp_all(void *binary, int count)
{
return any_cracked;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void set_key(char *key, int index)
{
/* convert key to UTF-16LE */
saved_len[index] = enc_to_utf16(saved_key[index], PLAINTEXT_LENGTH, (UTF8*)key, strlen(key));
if (saved_len[index] < 0)
saved_len[index] = strlen16(saved_key[index]);
saved_len[index] <<= 1;
new_keys = 1;
}
static char *get_key(int index)
{
return (char*)utf16_to_enc(saved_key[index]);
}
static unsigned int oo_hash_type(void *salt)
{
custom_salt *my_salt;
my_salt = *(custom_salt**)salt;
return (unsigned int) my_salt->type;
}
struct fmt_main fmt_oldoffice = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_UNICODE | FMT_UTF8 | FMT_SPLIT_UNIFIES_CASE | FMT_DYNA_SALT,
{
"hash type",
},
{ FORMAT_TAG },
oo_tests
}, {
init,
done,
fmt_default_reset,
prepare,
valid,
split,
fmt_default_binary,
get_salt,
{
oo_hash_type,
},
source,
{
fmt_default_binary_hash
},
fmt_default_dyna_salt_hash,
salt_compare,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
overlapping.h | #include "CSC.h"
#include "align.h"
#include "common.h"
#include "../kmercode/hash_funcs.h"
#include "../kmercode/Kmer.hpp"
#include "../kmercode/Buffer.h"
#include "../kmercode/common.h"
#include "../kmercode/fq_reader.h"
#include "../kmercode/ParallelFASTQ.h"
#include "../libcuckoo/cuckoohash_map.hh"
#ifndef __NVCC__
#include "../xavier/xavier.h"
#endif
#include <seqan/sequence.h>
#include <seqan/align.h>
#include <seqan/score.h>
#include <seqan/modifier.h>
#include <seqan/seeds.h>
#include <omp.h>
#include <fstream>
#include <iostream>
#include <sstream>
#include <string>
#include <cstdlib>
#include <algorithm>
#include <ctype.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <dirent.h>
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <stdint.h>
#include <set>
using namespace seqan;
#ifdef __NVCC__
#include "../loganGPU/logan.cuh"
#endif
typedef Seed<Simple> TSeed;
typedef SeedSet<TSeed> TSeedSet;
#define PERCORECACHE (1024 * 1024)
#define TIMESTEP
#ifndef PRINT
#define PRINT
#endif
//#define THREADLIMIT
//#define MAX_NUM_THREAD 1
//#define OSX
//#define LINUX
//#define RAM
#ifndef __SIMD__
#define __SIMD__
#endif
#ifdef OSX
#include <mach/mach.h>
#include <mach/vm_statistics.h>
#include <mach/mach_types.h>
#include <mach/mach_init.h>
#include <mach/mach_host.h>
#endif
#ifdef LINUX
#include "sys/types.h"
#include "sys/sysinfo.h"
struct sysinfo info;
#endif
double safety_net = 1.5;
/*
Multithreaded prefix sum
Inputs:
in: an input array
size: the length of the input array "in"
nthreads: number of threads used to compute the prefix sum
Output:
return an array of size "size+1"
the memory of the output array is allocated internallay
Example:
in = [2, 1, 3, 5]
out = [0, 2, 3, 6, 11]
*/
template <typename T>
T* prefixsum(T* in, int size, int nthreads)
{
std::vector<T> tsum(nthreads+1);
tsum[0] = 0;
T* out = new T[size+1];
out[0] = 0;
T* psum = &out[1];
#pragma omp parallel
{
int ithread = omp_get_thread_num();
T sum = 0;
#pragma omp for schedule(static)
for (int i=0; i<size; i++)
{
sum += in[i];
psum[i] = sum;
}
tsum[ithread+1] = sum;
#pragma omp barrier
T offset = 0;
for(int i=0; i<(ithread+1); i++)
{
offset += tsum[i];
}
#pragma omp for schedule(static)
for (int i=0; i<size; i++)
{
psum[i] += offset;
}
}
return out;
}
/* fix according to PAF format */
void toOriginalCoordinates(int& begpH, int& endpH, const int lenH)
{
unsigned int tmp = begpH;
begpH = lenH-endpH;
endpH = lenH-tmp;
}
// estimate the number of floating point operations of SpGEMM
template <typename IT, typename NT>
IT* estimateFLOP(const CSC<IT,NT> & A, const CSC<IT,NT> & B, bool lowtriout)
{
if(A.isEmpty() || B.isEmpty())
{
return NULL;
}
IT* colflopC = new IT[B.cols]; // nnz in every column of C
#pragma omp parallel for
for(IT i=0; i< B.cols; ++i)
{
colflopC[i] = 0;
}
#pragma omp parallel for
for(IT i=0; i < B.cols; ++i)
{
// size_t nnzcolB = B.colptr[i+1] - B.colptr[i]; // nnz in the current column of B
int myThread = omp_get_thread_num();
for (IT j = B.colptr[i]; j < B.colptr[i+1]; ++j) // all nonzeros in that column of B
{
IT col2fetch = B.rowids[j]; // find the row index of that nonzero in B, which is the column to fetch in A
IT nnzcolA = 0;
if(lowtriout)
{
for(IT k = A.colptr[col2fetch]; k < A.colptr[col2fetch+1]; ++k) // all nonzeros in this column of A
{
// i is the column_id of the output and A.rowids[k] is the row_id of the output
if(i < A.rowids[k])
{
++nnzcolA;
}
}
}
else
{
nnzcolA = A.colptr[col2fetch+1]- A.colptr[col2fetch]; // nonzero count of that column of A
}
colflopC[i] += nnzcolA;
}
}
return colflopC;
}
// estimate space for result of SpGEMM with Hash
template <typename IT, typename NT>
IT* estimateNNZ_Hash(const CSC<IT,NT>& A, const CSC<IT,NT>& B, const IT* flopC, bool lowtriout)
{
if(A.isEmpty() || B.isEmpty())
{
return NULL;
}
IT* colnnzC = new IT[B.cols]; // nnz in every column of C
#pragma omp parallel for
for(IT i=0; i< B.cols; ++i)
{
colnnzC[i] = 0;
}
#pragma omp parallel for
for(IT i=0; i < B.cols; ++i) // for each column of B
{
// size_t nnzcolB = B.colptr[i+1] - B.colptr[i]; //nnz in the current column of B
int myThread = omp_get_thread_num();
// Hash
const unsigned int minHashTableSize = 16;
const unsigned int hashScale = 107;
// Initialize hash tables
size_t ht_size = minHashTableSize;
while(ht_size < flopC[i]) //ht_size is set as 2^n
{
ht_size <<= 1;
}
std::vector<IT> globalHashVec(ht_size);
for(size_t j=0; j < ht_size; ++j)
{
globalHashVec[j] = -1;
}
for (IT j = B.colptr[i]; j < B.colptr[i+1]; ++j) // all nonzeros in that column of B
{
IT col2fetch = B.rowids[j]; // find the row index of that nonzero in B, which is the column to fetch in A
for(IT k = A.colptr[col2fetch]; k < A.colptr[col2fetch+1]; ++k) // all nonzeros in this column of A
{
IT key = A.rowids[k];
if(lowtriout && i >= key) // i is the column_id of the output and key is the row_id of the output
continue;
IT hash = (key*hashScale) & (ht_size-1);
while (1) //hash probing
{
if (globalHashVec[hash] == key) //key is found in hash table
{
break;
}
else if (globalHashVec[hash] == -1) //key is not registered yet
{
globalHashVec[hash] = key;
colnnzC[i] ++;
break;
}
else //key is not found
{
hash = (hash+1) & (ht_size-1); // don't exit the while loop yet
}
}
}
}
}
return colnnzC;
}
//! Hash based column-by-column spgemm algorithm. Based on earlier code by Buluc, Azad, and Nagasaka
//! If lowtriout= true, then only creates the lower triangular part: no diagonal and no upper triangular
//! input matrices do not need to have sorted rowids within each column
template <typename IT, typename NT, typename MultiplyOperation, typename AddOperation, typename FT>
void LocalSpGEMM(IT & start, IT & end, const CSC<IT,NT> & A, const CSC<IT,NT> & B, MultiplyOperation multop, AddOperation addop,
vector<IT> * RowIdsofC, vector<FT> * ValuesofC, IT* colptrC, bool lowtriout)
{
#pragma omp parallel for
for(IT i = start; i < end; ++i) // for bcols of B (one block)
{
const IT minHashTableSize = 16;
const IT hashScale = 107;
size_t nnzcolC = colptrC[i+1] - colptrC[i]; //nnz in the current column of C (=Output)
IT ht_size = minHashTableSize;
while(ht_size < nnzcolC) //ht_size is set as 2^n
{
ht_size <<= 1;
}
std::vector< std::pair<IT,FT>> globalHashVec(ht_size);
// Initialize hash tables
for(IT j=0; j < ht_size; ++j)
{
globalHashVec[j].first = -1;
}
for (IT j = B.colptr[i]; j < B.colptr[i+1]; ++j) // all nonzeros in that column of B
{
IT col2fetch = B.rowids[j]; // find the row index of that nonzero in B, which is the column to fetch in A
NT valueofB = B.values[j];
for(IT k = A.colptr[col2fetch]; k < A.colptr[col2fetch+1]; ++k) // all nonzeros in this column of A
{
IT key = A.rowids[k];
// i is the column_id of the output and key is the row_id of the output
if(lowtriout && i >= key)
continue;
// GG: modified to get read ids needed to compute alnlenerlap length
FT result = multop(A.values[k], valueofB, key, i);
IT hash = (key*hashScale) & (ht_size-1);
while (1) //hash probing
{
if (globalHashVec[hash].first == key) //key is found in hash table
{ // GG: addop temporary modify, remalnlene key, i after testing
globalHashVec[hash].second = addop(result, globalHashVec[hash].second, key, i);
break;
}
else if (globalHashVec[hash].first == -1) //key is not registered yet
{
globalHashVec[hash].first = key;
globalHashVec[hash].second = result;
break;
}
else //key is not found
{
hash = (hash+1) & (ht_size-1); // don't exit the while loop yet
}
}
}
}
// gather non-zero elements from hash table (and then sort them by row indices if needed)
IT index = 0;
for (IT j=0; j < ht_size; ++j)
{
if (globalHashVec[j].first != -1)
{
globalHashVec[index++] = globalHashVec[j];
}
}
#ifdef SORTCOLS
std::sort(globalHashVec.begin(), globalHashVec.begin() + index, sort_less<IT, NT>);
#endif
RowIdsofC[i-start].resize(index);
ValuesofC[i-start].resize(index);
for (IT j=0; j< index; ++j)
{
RowIdsofC[i-start][j] = globalHashVec[j].first;
ValuesofC[i-start][j] = globalHashVec[j].second;
}
}
}
double estimateMemory(const BELLApars & b_pars)
{
double free_memory;
if (b_pars.userDefMem)
{
free_memory = b_pars.totalMemory * 1024 * 1024;
}
else
{
#if defined (OSX) // OSX-based memory consumption implementation
vm_unsigned int page_size;
mach_port_t mach_port;
mach_msg_type_number_t count;
vm_statistics64_data_t vm_stats;
mach_port = mach_host_self();
count = sizeof(vm_stats) / sizeof(natural_t);
if (KERN_SUCCESS == host_page_size(mach_port, &page_size) &&
KERN_SUCCESS == host_statistics64(mach_port, HOST_VM_INFO,
(host_info64_t)&vm_stats, &count))
{
free_memory = (double) vm_stats.free_count * (double)page_size;
}
#elif defined (LINUX) // LINUX-based memory consumption implementation
if(sysinfo(&info) != 0)
{
return false;
}
free_memory = info.freeram * info.mem_unit;
free_memory += info.freeswap * info.mem_unit;
free_memory += info.bufferram * info.mem_unit;
#else
free_memory = b_pars.totalMemory * 1024 * 1024; // memory is neither user-supplied nor can be estimated, so use BELLA's default
#endif
}
return free_memory;
}
#ifndef __NVCC__
// ======================================= //
// CPU Functions //
// ======================================= //
#ifdef __SIMD__
void PostAlignDecision(const xavierResult& maxExtScore,
#else
void PostAlignDecision(const seqAnResult& maxExtScore,
#endif
const readType_& read1, const readType_& read2,
const BELLApars& b_pars, double ratiophi, int count, stringstream& myBatch, size_t& outputted,
size_t& numBasesAlignedTrue, size_t& numBasesAlignedFalse, bool& passed, int const& matches)
{
auto maxseed = maxExtScore.seed; // returns a seqan:Seed object
// {begin/end}Position{V/H}: Returns the begin/end position of the seed in the query (vertical/horizonral direction)
// these four return seqan:Tposition objects
#ifdef __SIMD__
int begpV = getBeginPositionV(maxseed);
int endpV = getEndPositionV(maxseed);
int begpH = getBeginPositionH(maxseed);
int endpH = getEndPositionH(maxseed);
#else
int begpV = beginPositionV(maxseed);
int endpV = endPositionV(maxseed);
int begpH = beginPositionH(maxseed);
int endpH = endPositionH(maxseed);
#endif
// Get references for better naming
const string& seq1 = read1.seq; // H
const string& seq2 = read2.seq; // Vzw
unsigned short int read1len = seq1.length();
unsigned short int read2len = seq2.length();
unsigned short int overlapLenV = endpV - begpV;
unsigned short int overlapLenH = endpH - begpH;
unsigned short int minLeft = min(begpV, begpH);
unsigned short int minRight = min(read2len - endpV, read1len - endpH);
unsigned short int ov = minLeft + minRight + (overlapLenV + overlapLenH) / 2;
unsigned short int normLen = max(overlapLenV, overlapLenH);
unsigned short int minLen = min(overlapLenV, overlapLenH);
if(b_pars.fixedThreshold == -1)
{
float mythreshold = (1 - b_pars.deltaChernoff) * (ratiophi * (float)ov);
if((float)maxExtScore.score >= mythreshold)
{
passed = true;
}
}
else if(maxExtScore.score >= b_pars.fixedThreshold) // GG: this is only useful for debugging
{
passed = true;
}
if(passed)
{
if(!b_pars.outputPaf) // BELLA output format
{
myBatch << read2.nametag << '\t' << read1.nametag << '\t' << count << '\t' << maxExtScore.score << '\t' << ov << '\t' << maxExtScore.strand << '\t' <<
begpV << '\t' << endpV << '\t' << read2len << '\t' << begpH << '\t' << endpH << '\t' << read1len << endl;
}
else
{
std::string pafstrand; // maxExtScore not modifiable
unsigned short int mapq = 255; // mapping quality (0-255; 255 for missing)
if(maxExtScore.strand == "n") pafstrand = "+";
else pafstrand = "-";
if(pafstrand == "-")
toOriginalCoordinates(begpH, endpH, read1len);
// PAF format is the output format used by minimap/minimap2: https://github.com/lh3/miniasm/blob/master/PAF.md
myBatch << read2.nametag << '\t' << read2len << '\t' << begpV << '\t' << endpV << '\t' << pafstrand << '\t' <<
read1.nametag << '\t' << read1len << '\t' << begpH << '\t' << endpH << '\t' << maxExtScore.score << '\t' << ov << '\t' << mapq << endl;
}
++outputted;
numBasesAlignedTrue += (endpV-begpV);
}
else
{
numBasesAlignedFalse += (endpV-begpV);
}
}
template <typename IT, typename FT>
auto RunPairWiseAlignments(IT start, IT end, IT offset, IT * colptrC, IT * rowids, FT * values, const readVector_& reads,
char* filename, const BELLApars& b_pars, const double& ratiophi)
{
size_t alignedpairs = 0;
size_t alignedbases = 0;
size_t totalreadlen = 0;
size_t totaloutputt = 0;
size_t totsuccbases = 0;
size_t totfailbases = 0;
int numThreads = 1;
#pragma omp parallel
{
numThreads = omp_get_num_threads();
}
vector<stringstream> vss(numThreads); // any chance of false sharing here? depends on how stringstream is implemented. optimize later if needed...
#pragma omp parallel for schedule(dynamic)
for(IT j = start; j < end; ++j) // for (end-start) columns of A^T A (one block)
{
size_t numAlignmentsThread = 0;
size_t numBasesAlignedThread = 0;
size_t readLengthsThread = 0;
size_t numBasesAlignedTrue = 0;
size_t numBasesAlignedFalse = 0;
size_t outputted = 0;
int ithread = omp_get_thread_num();
for (IT i = colptrC[j]; i < colptrC[j+1]; ++i) // all nonzeros in that column of A^T A
{
unsigned int rid = rowids[i-offset]; // row id
unsigned int cid = j; // column id
const string& seq1 = reads[rid].seq; // get reference for readibility
const string& seq2 = reads[cid].seq; // get reference for readibility
unsigned short int seq1len = seq1.length();
unsigned short int seq2len = seq2.length();
spmatPtr_ val = values[i-offset];
if(!b_pars.skipAlignment) // fix -z to not print
{
numAlignmentsThread++;
readLengthsThread = readLengthsThread + seq1len + seq2len;
#ifdef __SIMD__
xavierResult maxExtScore;
#else
seqAnResult maxExtScore;
#endif
bool passed = false;
// GG: number of matching kmer into the majority voted bin
unsigned short int matches = val->chain();
unsigned short int overlap;
pair<int, int> kmer = val->choose();
int i = kmer.first, j = kmer.second;
// GG: nucleotide alignment
#ifdef __SIMD__
maxExtScore = xavierAlign(seq1, seq2, seq1len, i, j, b_pars.xDrop, b_pars.kmerSize);
#else
maxExtScore = alignSeqAn(seq1, seq2, seq1len, i, j, b_pars.xDrop, b_pars.kmerSize);
#endif
PostAlignDecision(maxExtScore, reads[rid], reads[cid], b_pars, ratiophi, val->count, vss[ithread],
outputted, numBasesAlignedTrue, numBasesAlignedFalse, passed, matches);
#ifdef __SIMD__
numBasesAlignedThread += getEndPositionV(maxExtScore.seed)-getBeginPositionV(maxExtScore.seed);
#else
numBasesAlignedThread += endPositionV(maxExtScore.seed)-beginPositionV(maxExtScore.seed);
#endif
}
else // if skipAlignment == false do alignment, else save just some info on the pair to file
{
pair<int, int> kmer = val->choose();
int i = kmer.first, j = kmer.second;
int overlap = overlapop(reads[rid].seq, reads[cid].seq, i, j, b_pars.kmerSize);
vss[ithread] << reads[cid].nametag << '\t' << reads[rid].nametag << '\t' << val->count << '\t' <<
overlap << '\t' << seq2len << '\t' << seq1len << endl;
++outputted;
// vss[ithread] << reads[cid].nametag << '\t' << reads[rid].nametag << '\t' << val->count << '\t' <<
// seq2len << '\t' << seq1len << std::endl;
// ++outputted;
}
} // all nonzeros in that column of A^T A
#pragma omp critical
{
alignedpairs += numAlignmentsThread;
alignedbases += numBasesAlignedThread;
totalreadlen += readLengthsThread;
totaloutputt += outputted;
totsuccbases += numBasesAlignedTrue;
totfailbases += numBasesAlignedFalse;
}
} // all columns from start...end (omp for loop)
double outputting = omp_get_wtime();
int64_t* bytes = new int64_t[numThreads];
for(int i = 0; i < numThreads; ++i)
{
vss[i].seekg(0, ios::end);
bytes[i] = vss[i].tellg();
vss[i].seekg(0, ios::beg);
}
int64_t bytestotal = std::accumulate(bytes, bytes+numThreads, static_cast<int64_t>(0));
std::ofstream ofs(filename, std::ios::binary | std::ios::app);
std::string str1 = std::to_string((double)bytestotal/(double)(1024 * 1024));
std::string str2 = " MB";
std::string OutputSize = str1 + str2;
printLog(OutputSize);
ofs.seekp(bytestotal - 1);
ofs.write("", 1); // this will likely create a sparse file so the actual disks won't spin yet
ofs.close();
#pragma omp parallel
{
int ithread = omp_get_thread_num();
FILE *ffinal;
if ((ffinal = fopen(filename, "rb+")) == NULL) // then everyone fills it
{
fprintf(stderr, "File %s failed to open at thread %d\n", filename, ithread);
}
int64_t bytesuntil = std::accumulate(bytes, bytes+ithread, static_cast<int64_t>(0));
fseek (ffinal, bytesuntil, SEEK_SET);
std::string text = vss[ithread].str();
fwrite(text.c_str(),1, bytes[ithread] ,ffinal);
fflush(ffinal);
fclose(ffinal);
}
delete [] bytes;
double timeoutputt = omp_get_wtime()-outputting;
return make_tuple(alignedpairs, alignedbases, totalreadlen, totaloutputt, totsuccbases, totfailbases, timeoutputt);
}
/**
* Sparse multithreaded GEMM.
**/
template <typename IT, typename NT, typename FT, typename MultiplyOperation, typename AddOperation>
void HashSpGEMM(const CSC<IT,NT>& A, const CSC<IT,NT>& B, MultiplyOperation multop, AddOperation addop, const readVector_& reads,
FT& getvaluetype, char* filename, const BELLApars& b_pars, const double& ratiophi)
{
double free_memory = estimateMemory(b_pars);
std::string str1 = std::to_string(free_memory / (1024 * 1024));
std::string str2 = " MB";
std::string AvailableRAM = str1 + str2;
printLog(AvailableRAM);
int numThreads = 1;
#pragma omp parallel
{
numThreads = omp_get_num_threads();
}
IT* flopC = estimateFLOP(A, B, true);
IT* flopptr = prefixsum<IT>(flopC, B.cols, numThreads);
IT flops = flopptr[B.cols];
std::string FLOPs = std::to_string(flops);
printLog(FLOPs);
IT* colnnzC = estimateNNZ_Hash(A, B, flopC, true);
IT* colptrC = prefixsum<IT>(colnnzC, B.cols, numThreads); // colptrC[i] = rolling sum of nonzeros in C[1...i]
delete [] colnnzC;
delete [] flopptr;
delete [] flopC;
IT nnzc = colptrC[B.cols];
double compression_ratio = (double)flops / nnzc;
uint64_t required_memory = safety_net * nnzc * (sizeof(FT)+sizeof(IT)); // required memory to form the output
int stages = std::ceil((double) required_memory/ free_memory); // form output in stages
uint64_t nnzcperstage = free_memory / (safety_net * (sizeof(FT)+sizeof(IT)));
std::string nnzOutput = std::to_string(nnzc);
std::string FreeMemory = std::to_string(free_memory) + " MB";
std::string CompressionRatio = std::to_string(compression_ratio);
std::string RequiredMemory = std::to_string(required_memory) + " MB";
std::string RequiredStages = std::to_string(stages);
printLog(nnzOutput);
printLog(CompressionRatio);
printLog(FreeMemory);
printLog(RequiredMemory);
printLog(RequiredStages);
IT * colStart = new IT[stages+1]; // one array is enough to set stage boundaries
colStart[0] = 0;
for(int i = 1; i < stages; ++i) // colsPerStage is no longer fixed (helps with potential load imbalance)
{
// std::upper_bound returns an iterator pointing to the first element
// in the range [first, last) that is greater than value, or last if no such element is found
auto upper = std::upper_bound(colptrC, colptrC+B.cols+1, i*nnzcperstage );
colStart[i] = upper - colptrC - 1; // we don't want the element that exceeds our budget, we want the one just before that
}
colStart[stages] = B.cols;
for(int b = 0; b < stages; ++b)
{
double alnlenl = omp_get_wtime();
vector<IT> * RowIdsofC = new vector<IT>[colStart[b+1]-colStart[b]]; // row ids for each column of C (bunch of cols)
vector<FT> * ValuesofC = new vector<FT>[colStart[b+1]-colStart[b]]; // values for each column of C (bunch of cols)
LocalSpGEMM(colStart[b], colStart[b+1], A, B, multop, addop, RowIdsofC, ValuesofC, colptrC, true);
double alnlen2 = omp_get_wtime();
std::string ColumnsRange = "[" + std::to_string(colStart[b]) + " - " + std::to_string(colStart[b+1]) + "]";
printLog(ColumnsRange);
std::string OverlapTime = std::to_string(alnlen2-alnlenl) + " seconds";
printLog(OverlapTime);
IT endnz = colptrC[colStart[b+1]];
IT begnz = colptrC[colStart[b]];
IT * rowids = new IT[endnz-begnz];
FT * values = new FT[endnz-begnz];
for(IT i=colStart[b]; i<colStart[b+1]; ++i) // combine step
{
IT loccol = i-colStart[b];
IT locnz = colptrC[i]-begnz;
copy(RowIdsofC[loccol].begin(), RowIdsofC[loccol].end(), rowids + locnz);
copy(ValuesofC[loccol].begin(), ValuesofC[loccol].end(), values + locnz);
}
delete [] RowIdsofC;
delete [] ValuesofC;
// GG: all paralelism moved to GPU we can do better
tuple<size_t, size_t, size_t, size_t, size_t, size_t, double> alignstats; // (alignedpairs, alignedbases, totalreadlen, outputted, alignedtrue, alignedfalse, timeoutputt)
alignstats = RunPairWiseAlignments(colStart[b], colStart[b+1], begnz, colptrC, rowids, values, reads, filename, b_pars, ratiophi);
if(!b_pars.skipAlignment)
{
double elapsed = omp_get_wtime()-alnlen2;
double aligntime = elapsed-get<6>(alignstats); // substracting outputting time
std::string ColumnsRange = "[" + std::to_string(colStart[b]) + " - " + std::to_string(colStart[b+1]) + "]";
printLog(ColumnsRange);
std::string AlignmentTime = std::to_string(aligntime) + " seconds";
printLog(AlignmentTime);
std::string AlignmentRate = std::to_string((int)(static_cast<double>(get<1>(alignstats))/aligntime)) + " bases/second";
printLog(AlignmentRate);
std::string AverageReadLength = std::to_string((int)(static_cast<double>(get<2>(alignstats))/(2*get<0>(alignstats))));
printLog(AverageReadLength);
std::string PairsAligned = std::to_string(get<0>(alignstats));
printLog(PairsAligned);
std::string AverageLengthSuccessfulAlignment = std::to_string((int)(static_cast<double>(get<4>(alignstats))/get<3>(alignstats))) + " bps";
printLog(AverageLengthSuccessfulAlignment);
std::string AverageLengthFailedAlignment = std::to_string((int)(static_cast<double>(get<5>(alignstats)) / (get<0>(alignstats) - get<3>(alignstats)))) + " bps";
printLog(AverageLengthFailedAlignment);
}
int LinesOutputted = get<3>(alignstats);
printLog(LinesOutputted);
std::string OutputtingTime = std::to_string(get<6>(alignstats)) + " seconds";
printLog(OutputtingTime);
delete [] rowids;
delete [] values;
} // for(int b = 0; b < states; ++b)
delete [] colptrC;
delete [] colStart;
}
#else // #ifndef __NVCC__
// ======================================= //
// GPU Functions //
// ======================================= //
void PostAlignDecisionGPU(const loganResult& maxExtScore, const readType_& read1, const readType_& read2,
const BELLApars& b_pars, double ratiophi, int count, stringstream& myBatch, size_t& outputted,
size_t& numBasesAlignedTrue, size_t& numBasesAlignedFalse, bool& passed)
{
// returns a Logan::Seed object
SeedL maxseed = maxExtScore.seed;
// {begin/end}Position{V/H}: Returns the begin/end position of the seed in the query (vertical/horizonral direction)
// these four return seqan:Tposition objects
auto begpV = getBeginPositionV(maxseed);
auto endpV = getEndPositionV(maxseed);
auto begpH = getBeginPositionH(maxseed);
auto endpH = getEndPositionH(maxseed);
// get references for better naming
const string& seq1 = read1.seq; // H
const string& seq2 = read2.seq; // Vzw
unsigned short int read1len = seq1.length();
unsigned short int read2len = seq2.length();
// GG: divergence estimation
unsigned short int overlapLenV = endpV - begpV;
unsigned short int overlapLenH = endpH - begpH;
unsigned short int minLeft = min(begpV, begpH);
unsigned short int minRight = min(read2len - endpV, read1len - endpH);
unsigned short int ov = minLeft + minRight + (overlapLenV + overlapLenH) / 2;
unsigned short int normLen = max(overlapLenV, overlapLenH);
unsigned short int minLen = min(overlapLenV, overlapLenH);
if(b_pars.fixedThreshold == -1)
{
double mythreshold = (1 - b_pars.deltaChernoff) * (ratiophi * (double)ov);
if((double)maxExtScore.score >= mythreshold)
{
passed = true;
}
}
else if(maxExtScore.score >= b_pars.fixedThreshold) // GG: this is only useful for debugging
{
passed = true;
}
if(passed)
{
if(!b_pars.outputPaf) // BELLA output format
{
myBatch << read2.nametag << '\t' << read1.nametag << '\t' << count << '\t' << maxExtScore.score << '\t' << ov << '\t' << maxExtScore.strand << '\t' <<
begpV << '\t' << endpV << '\t' << read2len << '\t' << begpH << '\t' << endpH << '\t' << read1len << endl;
}
else
{
std::string pafstrand; // maxExtScore not modifiable
unsigned short int mapq = 255; // mapping quality (0-255; 255 for missing)
if(maxExtScore.strand == "n") pafstrand = "+";
else pafstrand = "-";
if(pafstrand == "-")
toOriginalCoordinates(begpH, endpH, read1len);
// PAF format is the output format used by minimap/minimap2: https://github.com/lh3/miniasm/blob/master/PAF.md
myBatch << read2.nametag << '\t' << read2len << '\t' << begpV << '\t' << endpV << '\t' << pafstrand << '\t' <<
read1.nametag << '\t' << read1len << '\t' << begpH << '\t' << endpH << '\t' << maxExtScore.score << '\t' << ov << '\t' << mapq << endl;
}
++outputted;
numBasesAlignedTrue += (endpV-begpV);
}
else
{
numBasesAlignedFalse += (endpV-begpV);
}
}
// (unsigned int, unsigned int, unsigned int, unsigned int *, unsigned int *, spmatPtr_ *,
// const readVector_, const BELLApars, char *, double)
template <typename IT, typename FT>
std::tuple<uint64_t, uint64_t, uint64_t, uint64_t, uint64_t, uint64_t, double>
RunPairWiseAlignmentsGPU(IT start, IT end, IT offset, IT * colptrC, IT * rowids, FT * values, const readVector_& reads,
const BELLApars& b_pars, char* filename, double ratiophi)
{
stringstream ss;
vector<string> seq1s;
vector<string> seq2s;
vector<SeedL> seeds;
vector<loganResult> maxExtScoreL;
uint64_t outputted = 0;
int count = 0;
//#pragma omp parallel for schedule(dynamic) // keep the order for the post evaluation code
for(IT j = start; j < end; ++j) // acculate sequences for GPU batch alignment
{
count++;
for (IT i = colptrC[j]; i < colptrC[j+1]; ++i)
{
unsigned int rid = rowids[i-offset]; // row id
unsigned int cid = j; // column id
const string& seq1 = reads[rid].seq; // get reference for readibility
const string& seq2 = reads[cid].seq; // get reference for readibility
unsigned short int seq1len = seq1.length();
unsigned short int seq2len = seq2.length();
spmatPtr_ val = values[i-offset];
if(!b_pars.skipAlignment) // fix -z to not print
{
loganResult localRes;
// GG: number of matching kmer into the majority voted bin
unsigned short int matches = val->chain();
pair<int, int> kmer = val->choose();
int i = kmer.first, j = kmer.second;
std::string strand = "n";
SeedL seed(i, j, i + b_pars.kmerSize, j + b_pars.kmerSize);
std::string seedH = seq1.substr(getBeginPositionH(seed), b_pars.kmerSize);
std::string seedV = seq2.substr(getBeginPositionV(seed), b_pars.kmerSize);
std::string seedHcpy = reversecomplement(seedH);
std::string cpyseq1(seq1);
if(seedHcpy == seedV)
{
strand = "c";
std::reverse(std::begin(cpyseq1), std::end(cpyseq1));
std::transform(std::begin(cpyseq1), std::end(cpyseq1), std::begin(cpyseq1), complementbase);
setBeginPositionH(seed, seq1len - i - b_pars.kmerSize);
setBeginPositionV(seed, j);
setEndPositionH(seed, seq1len - i);
setEndPositionV(seed, j + b_pars.kmerSize);
}
localRes.strand = strand;
seeds.push_back(seed);
seq2s.push_back(seq2);
seq1s.push_back(cpyseq1);
maxExtScoreL.push_back(localRes);
}
else // if skipAlignment == false do alignment, else save just some info on the pair to file
{
pair<int, int> kmer = val->choose();
int i = kmer.first, j = kmer.second;
int overlap = overlapop(reads[rid].seq, reads[cid].seq, i, j, b_pars.kmerSize);
// vss[ithread] << reads[cid].nametag << '\t' << reads[rid].nametag << '\t' << val->count << '\t' <<
// seq2len << '\t' << seq1len << endl;
ss << reads[cid].nametag << '\t' << reads[rid].nametag << '\t' << val->count << '\t' <<
overlap << '\t' << seq2len << '\t' << seq1len << endl;
++outputted;
}
}
}
uint64_t alignedpairs = 0;
uint64_t alignedbases = 0;
uint64_t totalreadlen = 0;
uint64_t totaloutputt = 0;
uint64_t totsuccbases = 0;
uint64_t totfailbases = 0;
if(!b_pars.skipAlignment) // fix -z to not print
{
std::string AlignmentGPU = "Started";
printLog(AlignmentGPU);
alignLogan(seq1s, seq2s, seeds, b_pars, maxExtScoreL);
AlignmentGPU = "Completed";
printLog(AlignmentGPU);
uint64_t idx = 0;
// no parallelism to keep same order of pairs in alignment
for(IT j = start; j < end; ++j) // for (end-start) columns of A^T A (one block)
{
// uint64_t numAlignmentsThread = 0;
// uint64_t numBasesAlignedThread = 0;
// uint64_t readLengthsThread = 0;
// uint64_t numBasesAlignedTrue = 0;
// uint64_t numBasesAlignedFalse = 0;
for (IT i = colptrC[j]; i < colptrC[j+1]; ++i) // all nonzeros in that column of A^T A
{
unsigned int rid = rowids[i-offset]; // row id
unsigned int cid = j; // column id
const string& seq1 = reads[rid].seq; // get reference for readibility
const string& seq2 = reads[cid].seq; // get reference for readibility
unsigned short int seq1len = seq1.length();
unsigned short int seq2len = seq2.length();
spmatPtr_ val = values[i-offset];
alignedpairs++;
totalreadlen = totalreadlen + seq1len + seq2len;
// readLengthsThread = readLengthsThread + seq1len + seq2len;
bool passed = false;
loganResult maxExtScore = maxExtScoreL[idx];
PostAlignDecisionGPU(maxExtScore, reads[rid], reads[cid], b_pars, ratiophi, val->count,
ss, totaloutputt, totsuccbases, totfailbases, passed);
idx++; // pairs aligned
// numBasesAlignedThread += getEndPositionV(maxExtScore.seed) - getBeginPositionV(maxExtScore.seed);
alignedbases += getEndPositionV(maxExtScore.seed) - getBeginPositionV(maxExtScore.seed);
} // all nonzeros in that column of A^T A
// GG: no need for multithreaded style here
// alignedpairs += numAlignmentsThread;
// alignedbases += numBasesAlignedThread;
// totalreadlen += readLengthsThread;
// totaloutputt += outputted;
// totsuccbases += numBasesAlignedTrue;
// totfailbases += numBasesAlignedFalse;
// printLog(totsuccbases);
// printLog(totfailbases);
} // all columns from start...end (omp for loop)
}
double outputting = omp_get_wtime();
int64_t bytestotal;
ss.seekg(0, ios::end);
bytestotal = ss.tellg();
ss.seekg(0, ios::beg);
std::ofstream ofs(filename, std::ios::binary | std::ios::app);
std::string str1 = std::to_string((double)bytestotal/(double)(1024 * 1024));
std::string str2 = " MB";
std::string OutputSize = str1 + str2;
printLog(OutputSize);
ofs.seekp(bytestotal - 1);
ofs.write("", 1); // this will likely create a sparse file so the actual disks won't spin yet
ofs.close();
FILE *ffinal;
if ((ffinal = fopen(filename, "rb+")) == NULL) // then everyone fills it
{
fprintf(stderr, "File %s failed to open\n", filename);
}
// int64_t bytesuntil = std::accumulate(bytes, bytes+ithread, static_cast<int64_t>(0));
fseek (ffinal , bytestotal , SEEK_SET);
// std::string text = vss[ithread].str();
std::string text = ss.str();
fwrite(text.c_str(), 1, bytestotal, ffinal);
fflush(ffinal);
fclose(ffinal);
double timeoutputt = omp_get_wtime()-outputting;
return std::make_tuple(alignedpairs, alignedbases, totalreadlen, totaloutputt, totsuccbases, totfailbases, timeoutputt);
}
/**
* Sparse multithreaded GEMM.
**/
template <typename IT, typename NT, typename FT, typename MultiplyOperation, typename AddOperation>
void HashSpGEMMGPU(const CSC<IT,NT> & A, const CSC<IT,NT> & B, MultiplyOperation multop, AddOperation addop, const readVector_& reads,
FT& getvaluetype, char* filename, const BELLApars& b_pars, const double& ratiophi)
{
double free_memory = estimateMemory(b_pars);
std::string str1 = std::to_string(free_memory / (1024 * 1024));
std::string str2 = " MB";
std::string AvailableRAM = str1 + str2;
printLog(AvailableRAM);
int numThreads = 1;
#pragma omp parallel
{
numThreads = omp_get_num_threads();
}
IT* flopC = estimateFLOP(A, B, true);
IT* flopptr = prefixsum<IT>(flopC, B.cols, numThreads);
IT flops = flopptr[B.cols];
std::string FLOPs = std::to_string(flops);
printLog(FLOPs);
IT* colnnzC = estimateNNZ_Hash(A, B, flopC, true);
IT* colptrC = prefixsum<IT>(colnnzC, B.cols, numThreads); // colptrC[i] = rolling sum of nonzeros in C[1...i]
delete [] colnnzC;
delete [] flopptr;
delete [] flopC;
IT nnzc = colptrC[B.cols];
double compression_ratio = (double)flops / nnzc;
uint64_t required_memory = safety_net * nnzc * (sizeof(FT)+sizeof(IT)); // required memory to form the output
int stages = std::ceil((double) required_memory/ free_memory); // form output in stages
uint64_t nnzcperstage = free_memory / (safety_net * (sizeof(FT)+sizeof(IT)));
std::string nnzOutput = std::to_string(nnzc);
std::string FreeMemory = std::to_string(free_memory) + " MB";
std::string CompressionRatio = std::to_string(compression_ratio);
std::string RequiredMemory = std::to_string(required_memory) + " MB";
std::string RequiredStages = std::to_string(stages);
printLog(nnzOutput);
printLog(CompressionRatio);
printLog(FreeMemory);
printLog(RequiredMemory);
printLog(RequiredStages);
IT * colStart = new IT[stages+1]; // one array is enough to set stage boundaries
colStart[0] = 0;
for(int i = 1; i < stages; ++i) // colsPerStage is no longer fixed (helps with potential load imbalance)
{
// std::upper_bound returns an iterator pointing to the first element
// in the range [first, last) that is greater than value, or last if no such element is found
auto upper = std::upper_bound(colptrC, colptrC+B.cols+1, i*nnzcperstage );
colStart[i] = upper - colptrC - 1; // we don't want the element that exceeds our budget, we want the one just before that
}
colStart[stages] = B.cols;
for(int b = 0; b < stages; ++b)
{
double alnlenl = omp_get_wtime();
vector<IT> * RowIdsofC = new vector<IT>[colStart[b+1]-colStart[b]]; // row ids for each column of C (bunch of cols)
vector<FT> * ValuesofC = new vector<FT>[colStart[b+1]-colStart[b]]; // values for each column of C (bunch of cols)
LocalSpGEMM(colStart[b], colStart[b+1], A, B, multop, addop, RowIdsofC, ValuesofC, colptrC, true);
double alnlen2 = omp_get_wtime();
std::string ColumnsRange = "[" + std::to_string(colStart[b]) + " - " + std::to_string(colStart[b+1]) + "]";
printLog(ColumnsRange);
std::string OverlapTime = std::to_string(alnlen2-alnlenl) + " seconds";
printLog(OverlapTime);
IT endnz = colptrC[colStart[b+1]];
IT begnz = colptrC[colStart[b]];
IT * rowids = new IT[endnz-begnz];
FT * values = new FT[endnz-begnz];
for(IT i=colStart[b]; i<colStart[b+1]; ++i) // combine step
{
IT loccol = i-colStart[b];
IT locnz = colptrC[i]-begnz;
copy(RowIdsofC[loccol].begin(), RowIdsofC[loccol].end(), rowids + locnz);
copy(ValuesofC[loccol].begin(), ValuesofC[loccol].end(), values + locnz);
}
delete [] RowIdsofC;
delete [] ValuesofC;
// GG: all paralelism moved to GPU we can do better
std::tuple<uint64_t, uint64_t, uint64_t, uint64_t, uint64_t, uint64_t, double> alignstats; // (alignedpairs, alignedbases, totalreadlen, outputted, alignedtrue, alignedfalse, timeoutputt)
alignstats = RunPairWiseAlignmentsGPU(colStart[b], colStart[b+1], begnz, colptrC, rowids, values, reads, b_pars, filename, ratiophi);
if(!b_pars.skipAlignment)
{
double elapsed = omp_get_wtime()-alnlen2;
double aligntime = elapsed-get<6>(alignstats); // substracting outputting time
std::string ColumnsRange = "[" + std::to_string(colStart[b]) + " - " + std::to_string(colStart[b+1]) + "]";
printLog(ColumnsRange);
std::string AlignmentTime = std::to_string(aligntime) + " seconds";
printLog(AlignmentTime);
std::string AlignmentRate = std::to_string((int)(static_cast<double>(get<1>(alignstats))/aligntime)) + " bases/second";
printLog(AlignmentRate);
std::string AverageReadLength = std::to_string((int)(static_cast<double>(get<2>(alignstats))/(2*get<0>(alignstats))));
printLog(AverageReadLength);
std::string PairsAligned = std::to_string(get<0>(alignstats));
printLog(PairsAligned);
std::string AverageLengthSuccessfulAlignment = std::to_string((int)(static_cast<double>(get<4>(alignstats))/get<3>(alignstats))) + " bps";
printLog(AverageLengthSuccessfulAlignment);
std::string AverageLengthFailedAlignment = std::to_string((int)(static_cast<double>(get<5>(alignstats)) / (get<0>(alignstats) - get<3>(alignstats)))) + " bps";
printLog(AverageLengthFailedAlignment);
}
int LinesOutputted = get<3>(alignstats);
printLog(LinesOutputted);
std::string OutputtingTime = std::to_string(get<6>(alignstats)) + " seconds";
printLog(OutputtingTime);
delete [] rowids;
delete [] values;
} //for(int b = 0; b < states; ++b)
delete [] colptrC;
delete [] colStart;
}
#endif // #ifdef __NVCC__
|
deconvolution_4x4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#if __ARM_NEON
#include <arm_neon.h>
#endif // __ARM_NEON
static void deconv4x4s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q=0; q<inch; q++)
{
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch*16 + q*16;
const float* r0 = img0;
const float* k0 = kernel0;
const float* k1 = kernel0 + 4;
const float* k2 = kernel0 + 8;
const float* k3 = kernel0 + 12;
#if __ARM_NEON
float32x4_t _k0 = vld1q_f32(k0);
float32x4_t _k1 = vld1q_f32(k1);
float32x4_t _k2 = vld1q_f32(k2);
float32x4_t _k3 = vld1q_f32(k3);
#endif // __ARM_NEON
for (int i = 0; i < h; i++)
{
float* outptr = out.data + out.w * i;
float* outptr0 = outptr;
float* outptr1 = outptr0 + outw;
float* outptr2 = outptr1 + outw;
float* outptr3 = outptr2 + outw;
int j = 0;
#if __ARM_NEON
for (; j+3<w; j+=4)
{
float32x4_t _v = vld1q_f32(r0);
//
float32x4_t _out00 = vld1q_f32(outptr0 + 0);
_out00 = vmlaq_lane_f32(_out00, _v, vget_low_f32(_k0), 0);
vst1q_f32(outptr0 + 0, _out00);
float32x4_t _out01 = vld1q_f32(outptr0 + 1);
_out01 = vmlaq_lane_f32(_out01, _v, vget_low_f32(_k0), 1);
vst1q_f32(outptr0 + 1, _out01);
float32x4_t _out02 = vld1q_f32(outptr0 + 2);
_out02 = vmlaq_lane_f32(_out02, _v, vget_high_f32(_k0), 0);
vst1q_f32(outptr0 + 2, _out02);
float32x4_t _out03 = vld1q_f32(outptr0 + 3);
_out03 = vmlaq_lane_f32(_out03, _v, vget_high_f32(_k0), 1);
vst1q_f32(outptr0 + 3, _out03);
//
float32x4_t _out10 = vld1q_f32(outptr1 + 0);
_out10 = vmlaq_lane_f32(_out10, _v, vget_low_f32(_k1), 0);
vst1q_f32(outptr1 + 0, _out10);
float32x4_t _out11 = vld1q_f32(outptr1 + 1);
_out11 = vmlaq_lane_f32(_out11, _v, vget_low_f32(_k1), 1);
vst1q_f32(outptr1 + 1, _out11);
float32x4_t _out12 = vld1q_f32(outptr1 + 2);
_out12 = vmlaq_lane_f32(_out12, _v, vget_high_f32(_k1), 0);
vst1q_f32(outptr1 + 2, _out12);
float32x4_t _out13 = vld1q_f32(outptr1 + 3);
_out13 = vmlaq_lane_f32(_out13, _v, vget_high_f32(_k1), 1);
vst1q_f32(outptr1 + 3, _out13);
//
float32x4_t _out20 = vld1q_f32(outptr2 + 0);
_out20 = vmlaq_lane_f32(_out20, _v, vget_low_f32(_k2), 0);
vst1q_f32(outptr2 + 0, _out20);
float32x4_t _out21 = vld1q_f32(outptr2 + 1);
_out21 = vmlaq_lane_f32(_out21, _v, vget_low_f32(_k2), 1);
vst1q_f32(outptr2 + 1, _out21);
float32x4_t _out22 = vld1q_f32(outptr2 + 2);
_out22 = vmlaq_lane_f32(_out22, _v, vget_high_f32(_k2), 0);
vst1q_f32(outptr2 + 2, _out22);
float32x4_t _out23 = vld1q_f32(outptr2 + 3);
_out23 = vmlaq_lane_f32(_out23, _v, vget_high_f32(_k2), 1);
vst1q_f32(outptr2 + 3, _out23);
//
float32x4_t _out30 = vld1q_f32(outptr3 + 0);
_out30 = vmlaq_lane_f32(_out30, _v, vget_low_f32(_k3), 0);
vst1q_f32(outptr3 + 0, _out30);
float32x4_t _out31 = vld1q_f32(outptr3 + 1);
_out31 = vmlaq_lane_f32(_out31, _v, vget_low_f32(_k3), 1);
vst1q_f32(outptr3 + 1, _out31);
float32x4_t _out32 = vld1q_f32(outptr3 + 2);
_out32 = vmlaq_lane_f32(_out32, _v, vget_high_f32(_k3), 0);
vst1q_f32(outptr3 + 2, _out32);
float32x4_t _out33 = vld1q_f32(outptr3 + 3);
_out33 = vmlaq_lane_f32(_out33, _v, vget_high_f32(_k3), 1);
vst1q_f32(outptr3 + 3, _out33);
r0 += 4;
outptr0 += 4;
outptr1 += 4;
outptr2 += 4;
outptr3 += 4;
}
#endif // __ARM_NEON
for (; j < w; j++)
{
float val = r0[0];
outptr0[0] += val * k0[0];
outptr0[1] += val * k0[1];
outptr0[2] += val * k0[2];
outptr0[3] += val * k0[3];
outptr1[0] += val * k1[0];
outptr1[1] += val * k1[1];
outptr1[2] += val * k1[2];
outptr1[3] += val * k1[3];
outptr2[0] += val * k2[0];
outptr2[1] += val * k2[1];
outptr2[2] += val * k2[2];
outptr2[3] += val * k2[3];
outptr3[0] += val * k3[0];
outptr3[1] += val * k3[1];
outptr3[2] += val * k3[2];
outptr3[3] += val * k3[3];
r0++;
outptr0++;
outptr1++;
outptr2++;
outptr3++;
}
}
}
}
}
static void deconv4x4s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q=0; q<inch; q++)
{
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch*16 + q*16;
const float* r0 = img0;
const float* k0 = kernel0;
const float* k1 = kernel0 + 4;
const float* k2 = kernel0 + 8;
const float* k3 = kernel0 + 12;
#if __ARM_NEON
float32x4_t _k0 = vld1q_f32(k0);
float32x4_t _k1 = vld1q_f32(k1);
float32x4_t _k2 = vld1q_f32(k2);
float32x4_t _k3 = vld1q_f32(k3);
#endif // __ARM_NEON
for (int i = 0; i < h; i++)
{
float* outptr = out.data + out.w * i*2;
float* outptr0 = outptr;
float* outptr1 = outptr0 + outw;
float* outptr2 = outptr1 + outw;
float* outptr3 = outptr2 + outw;
int j = 0;
#if __ARM_NEON
for (; j+3<w; j+=4)
{
float32x4_t _v = vld1q_f32(r0);
// row 0
float32x4x2_t _out0 = vld2q_f32(outptr0);
// 0,2,4,6
_out0.val[0] = vmlaq_lane_f32(_out0.val[0], _v, vget_low_f32(_k0), 0);
// 1,3,5,7
_out0.val[1] = vmlaq_lane_f32(_out0.val[1], _v, vget_low_f32(_k0), 1);
vst2q_f32(outptr0, _out0);
_out0 = vld2q_f32(outptr0 + 2);
// 2,4,6,8
_out0.val[0] = vmlaq_lane_f32(_out0.val[0], _v, vget_high_f32(_k0), 0);
// 3,5,7,9
_out0.val[1] = vmlaq_lane_f32(_out0.val[1], _v, vget_high_f32(_k0), 1);
vst2q_f32(outptr0 + 2, _out0);
// row 1
float32x4x2_t _out1 = vld2q_f32(outptr1);
// 0,2,4,6
_out1.val[0] = vmlaq_lane_f32(_out1.val[0], _v, vget_low_f32(_k1), 0);
// 1,3,5,7
_out1.val[1] = vmlaq_lane_f32(_out1.val[1], _v, vget_low_f32(_k1), 1);
vst2q_f32(outptr1, _out1);
_out1 = vld2q_f32(outptr1 + 2);
// 2,4,6,8
_out1.val[0] = vmlaq_lane_f32(_out1.val[0], _v, vget_high_f32(_k1), 0);
// 3,5,7,9
_out1.val[1] = vmlaq_lane_f32(_out1.val[1], _v, vget_high_f32(_k1), 1);
vst2q_f32(outptr1 + 2, _out1);
// row 2
float32x4x2_t _out2 = vld2q_f32(outptr2);
_out2.val[0] = vmlaq_lane_f32(_out2.val[0], _v, vget_low_f32(_k2), 0);
_out2.val[1] = vmlaq_lane_f32(_out2.val[1], _v, vget_low_f32(_k2), 1);
vst2q_f32(outptr2, _out2);
_out2 = vld2q_f32(outptr1 + 2);
_out2.val[0] = vmlaq_lane_f32(_out2.val[0], _v, vget_high_f32(_k2), 0);
_out2.val[1] = vmlaq_lane_f32(_out2.val[1], _v, vget_high_f32(_k2), 1);
vst2q_f32(outptr2 + 2, _out2);
// row 3
float32x4x2_t _out3 = vld2q_f32(outptr3);
_out3.val[0] = vmlaq_lane_f32(_out3.val[0], _v, vget_low_f32(_k3), 0);
_out3.val[1] = vmlaq_lane_f32(_out3.val[1], _v, vget_low_f32(_k3), 1);
vst2q_f32(outptr3, _out3);
_out3 = vld2q_f32(outptr3 + 2);
_out3.val[0] = vmlaq_lane_f32(_out3.val[0], _v, vget_high_f32(_k3), 0);
_out3.val[1] = vmlaq_lane_f32(_out3.val[1], _v, vget_high_f32(_k3), 1);
vst2q_f32(outptr3 + 2, _out3);
r0 += 4;
outptr0 += 8;
outptr1 += 8;
outptr2 += 8;
outptr3 += 8;
}
#endif // __ARM_NEON
for (; j < w; j++)
{
float val = r0[0];
outptr0[0] += val * k0[0];
outptr0[1] += val * k0[1];
outptr0[2] += val * k0[2];
outptr0[3] += val * k0[3];
outptr1[0] += val * k1[0];
outptr1[1] += val * k1[1];
outptr1[2] += val * k1[2];
outptr1[3] += val * k1[3];
outptr2[0] += val * k2[0];
outptr2[1] += val * k2[1];
outptr2[2] += val * k2[2];
outptr2[3] += val * k2[3];
outptr3[0] += val * k3[0];
outptr3[1] += val * k3[1];
outptr3[2] += val * k3[2];
outptr3[3] += val * k3[3];
r0++;
outptr0 += 2;
outptr1 += 2;
outptr2 += 2;
outptr3 += 2;
}
}
}
}
}
|
nukedclan_fmt_plug.c | /* Nuked-Klan CMS DB cracker patch for JtR. Hacked together during
* July of 2012 by Dhiru Kholia <dhiru.kholia at gmail.com>.
*
* This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification,
* are permitted.
*
* Input Format => user:$nk$*HASHKEY*hash
*
* Where,
*
* HASHKEY => hex(HASHKEY value found in conf.inc.php)
*
* Modified by JimF, Jul 2012. About 6x speed improvements.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_nk;
#elif FMT_REGISTERS_H
john_register_one(&fmt_nk);
#else
#include <string.h>
#include "arch.h"
#include "md5.h"
#include "sha.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "common.h"
#ifdef _OPENMP
#include <omp.h>
// Tuned on core i7 quad HT
// 1 5059K
// 16 8507k
// 64 8907k ** this was chosen.
// 128 8914k
// 256 8810k
#ifndef OMP_SCALE
#define OMP_SCALE 64
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "nk"
#define FORMAT_NAME "Nuked-Klan CMS"
#define FORMAT_TAG "$nk$*"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "SHA1 MD5 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1 /* change to 0 once there's any speedup for "many salts" */
#define PLAINTEXT_LENGTH 32
#define CIPHERTEXT_LENGTH (4+32+40+3+1)
#define BINARY_SIZE 16
#define SALT_SIZE sizeof(struct custom_salt)
#define BINARY_ALIGN sizeof(uint32_t)
#define SALT_ALIGN sizeof(int)
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 64
static struct fmt_tests nk_tests[] = {
{"$nk$*379637b4fcde21b2c5fbc9a00af505e997443267*#17737d3661312121d5ae7d5c6156c0298", "openwall"},
{"$nk$*379637b4fcde21b2c5fbc9a00af505e997443267*#5c20384512ee36590f5f0ab38a46c6ced", "password"},
// from pass_gen.pl
{"$nk$*503476424c5362476f36463630796a6e6c656165*#2f27c20e65b88b76c913115cdec3d9a18", "test1"},
{"$nk$*7a317a71794339586c434d50506b6e4356626a67*#b62a615f605c2fd520edde76577d30f90", "thatsworking"},
{"$nk$*796b7375666d7545695032413769443977644132*#4aec90bd9a930faaa42a0d7d40056132e", "test3"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)];
static struct custom_salt {
unsigned char HASHKEY[41];
int decal;
} *cur_salt;
inline static void hex_encode(unsigned char *str, int len, unsigned char *out)
{
int i;
for (i = 0; i < len; ++i) {
out[0] = itoa16[str[i]>>4];
out[1] = itoa16[str[i]&0xF];
out += 2;
}
}
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
static int omp_t = 1;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt);
crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt);
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[CIPHERTEXT_LENGTH + 1];
memcpy(out, ciphertext, CIPHERTEXT_LENGTH);
out[CIPHERTEXT_LENGTH] = 0;
strlwr(out);
return out;
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ptr, *ctcopy, *keeptr;
int extra;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN))
return 0;
if (!(ctcopy = strdup(ciphertext)))
return 0;
keeptr = ctcopy;
ctcopy += FORMAT_TAG_LEN; /* skip leading "$nk$*" */
if (!(ptr = strtokm(ctcopy, "*")))
goto error;
/* HASHKEY is of fixed length 40 */
if (hexlenl(ptr, &extra) != 40 || extra)
goto error;
if (!(ptr = strtokm(NULL, "*")))
goto error;
/* skip two characters, for "nk_tests[]" this is '#'
* followed by decal value */
if (strlen(ptr) <= 2)
goto error;
ptr += 2;
/* hash is of fixed length 32 */
if (hexlenl(ptr, &extra) != 32 || extra)
goto error;
MEM_FREE(keeptr);
return 1;
error:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
static struct custom_salt cs;
char _ctcopy[256], *ctcopy=_ctcopy;
char *p;
int i;
memset(&cs, 0, sizeof(cs));
strnzcpy(ctcopy, ciphertext, 255);
ctcopy += FORMAT_TAG_LEN; /* skip over "$nk$*" */
p = strtokm(ctcopy, "*");
for (i = 0; i < 20; i++)
cs.HASHKEY[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
cs.decal = atoi16[ARCH_INDEX(p[1])];
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE+1];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
p = strrchr(ciphertext, '*') + 1 + 2;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
#define COMMON_GET_HASH_VAR crypt_out
#include "common-get-hash.h"
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++) {
unsigned char pass[40+1];
unsigned char out[80];
int i, k;
int idx = 0;
MD5_CTX c;
SHA_CTX ctx;
SHA1_Init(&ctx);
SHA1_Update(&ctx, saved_key[index], strlen(saved_key[index]));
SHA1_Final(out, &ctx);
hex_encode(out, 20, pass);
for (i = 0, k=cur_salt->decal; i < 40; ++i, ++k) {
out[idx++] = pass[i];
if (k>19) k = 0;
out[idx++] = cur_salt->HASHKEY[k];
}
MD5_Init(&c);
MD5_Update(&c, out, 80);
MD5_Final((unsigned char*)crypt_out[index], &c);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (*((uint32_t*)binary) == crypt_out[index][0])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return *((uint32_t*)binary) == crypt_out[index][0];
}
static int cmp_exact(char *source, int index)
{
void *binary = get_binary(source);
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static void nk_set_key(char *key, int index)
{
strnzcpyn(saved_key[index], key, sizeof(*saved_key));
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_nk = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_SPLIT_UNIFIES_CASE,
{ NULL },
{ FORMAT_TAG },
nk_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
nk_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
#define COMMON_GET_HASH_LINK
#include "common-get-hash.h"
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
GB_unaryop__ainv_int16_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_int16_int64
// op(A') function: GB_tran__ainv_int16_int64
// C type: int16_t
// A type: int64_t
// cast: int16_t cij = (int16_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, aij) \
int16_t z = (int16_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT16 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_int16_int64
(
int16_t *Cx, // Cx and Ax may be aliased
int64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_int16_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__minus_int16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__minus_int16)
// A.*B function (eWiseMult): GB (_AemultB_08__minus_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__minus_int16)
// A.*B function (eWiseMult): GB (_AemultB_04__minus_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_int16)
// A*D function (colscale): GB (_AxD__minus_int16)
// D*A function (rowscale): GB (_DxB__minus_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__minus_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__minus_int16)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_int16)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_int16)
// C=scalar+B GB (_bind1st__minus_int16)
// C=scalar+B' GB (_bind1st_tran__minus_int16)
// C=A+scalar GB (_bind2nd__minus_int16)
// C=A'+scalar GB (_bind2nd_tran__minus_int16)
// C type: int16_t
// A type: int16_t
// A pattern? 0
// B type: int16_t
// B pattern? 0
// BinaryOp: cij = (aij - bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x - y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINUS || GxB_NO_INT16 || GxB_NO_MINUS_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__minus_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__minus_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__minus_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__minus_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__minus_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__minus_int16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__minus_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int16_t alpha_scalar ;
int16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int16_t *) alpha_scalar_in)) ;
beta_scalar = (*((int16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__minus_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__minus_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__minus_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__minus_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__minus_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x - bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__minus_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij - y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x - aij) ; \
}
GrB_Info GB (_bind1st_tran__minus_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij - y) ; \
}
GrB_Info GB (_bind2nd_tran__minus_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__pair_int16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__pair_int16)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__pair_int16)
// A.*B function (eWiseMult): GB (_AemultB_03__pair_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__pair_int16)
// A*D function (colscale): GB (_AxD__pair_int16)
// D*A function (rowscale): GB (_DxB__pair_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__pair_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__pair_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pair_int16)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: int16_t
// A type: int16_t
// B,b type: int16_t
// BinaryOp: cij = 1
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
;
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = 1 ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PAIR || GxB_NO_INT16 || GxB_NO_PAIR_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__pair_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__pair_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__pair_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__pair_int16)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__pair_int16)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__pair_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__pair_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__pair_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__pair_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__pair_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
; ;
Cx [p] = 1 ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = 1 ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = 1 ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = 1 ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
bf16_vec_kernel.h | #include "vec_type_cvt.h"
#if defined(CPU_AVX512)
#include <immintrin.h>
#else
#include "csrc/cpu/vec512/ref/add_ker.h"
#include "csrc/cpu/vec512/ref/mov_ker.h"
using namespace torch_ipex::cpu::kernel;
#endif
#if defined(CPU_AVX512)
inline __m512 pack_bf16_to_fp32(const __m256i top, const __m256i bot) {
auto x1 = _mm512_cvtepu16_epi32(top);
auto x2 = _mm512_cvtepu16_epi32(bot);
auto y = _mm512_add_epi32(_mm512_bslli_epi128(x1, 2), x2);
return _mm512_castsi512_ps(y);
}
#endif
// Only support AVX512 impl at current stage. Will expand this impl to cover
// AVX2 and other cases.
inline void packed_bf16_add_ker(
at::BFloat16* a1,
at::BFloat16* a2,
at::BFloat16* b,
int len,
float alpha) {
#if defined(CPU_AVX512)
auto vAlpha = _mm512_set1_ps(alpha);
int i = 0;
for (; i < len - 15; i += 16) {
auto x1 = _mm256_loadu_si256((__m256i*)(a1 + i));
auto x2 = _mm256_loadu_si256((__m256i*)(a2 + i));
auto y1 = _mm256_loadu_si256((__m256i*)(b + i));
auto z1 = pack_bf16_to_fp32(x1, x2);
auto z2 = cvt_bf16_to_fp32(y1);
z1 = _mm512_fmadd_ps(vAlpha, z2, z1);
// Update result back to split input tensors.
_mm256_storeu_si256((__m256i*)(a1 + i), trunc_fp32_to_bf16(z1));
_mm256_storeu_si256(
(__m256i*)(a2 + i), _mm512_cvtepi32_epi16(_mm512_castps_si512(z1)));
}
if (i < len) {
__mmask16 mask = (1 << (len - i)) - 1;
auto x1 = _mm256_maskz_loadu_epi16(mask, a1 + i);
auto x2 = _mm256_maskz_loadu_epi16(mask, a2 + i);
auto y1 = _mm256_maskz_loadu_epi16(mask, b + i);
auto z1 = pack_bf16_to_fp32(x1, x2);
auto z2 = cvt_bf16_to_fp32(y1);
z1 = _mm512_fmadd_ps(vAlpha, z2, z1);
// Update result back to split input tensors.
_mm256_mask_storeu_epi16(a1 + i, mask, trunc_fp32_to_bf16(z1));
_mm256_mask_storeu_epi16(
a2 + i, mask, _mm512_cvtepi32_epi16(_mm512_castps_si512(z1)));
}
#else
for (int i = 0; i < len; i++) {
uint32_t hi = (a1 + i)->x;
uint32_t lo = (a2 + i)->x;
uint32_t merge = hi << 16 | lo;
float a_val = *((float*)&merge);
float b_val = *(b + i);
float res = a_val + b_val * alpha;
(a1 + i)->x = (uint16_t)((*((uint32_t*)(&res))) >> 16);
(a2 + i)->x = *((uint16_t*)(&res));
}
#endif
}
inline void add_ker(at::BFloat16* inout, at::BFloat16* in, int len) {
int i = 0;
#if defined(CPU_AVX512)
#pragma unroll(2)
for (i = 0; i < len - 31; i += 32) {
auto inout1 = cvt_bf16_to_fp32(_mm256_loadu_si256((__m256i*)(inout + i)));
auto inout2 =
cvt_bf16_to_fp32(_mm256_loadu_si256((__m256i*)(inout + i + 16)));
auto in1 = cvt_bf16_to_fp32(_mm256_loadu_si256((__m256i*)(in + i)));
auto in2 = cvt_bf16_to_fp32(_mm256_loadu_si256((__m256i*)(in + i + 16)));
inout1 = _mm512_add_ps(inout1, in1);
inout2 = _mm512_add_ps(inout2, in2);
_mm256_storeu_si256((__m256i*)(inout + i), cvt_fp32_to_bf16(inout1));
_mm256_storeu_si256((__m256i*)(inout + i + 16), cvt_fp32_to_bf16(inout2));
}
if (i < len - 15) {
auto inout1 = cvt_bf16_to_fp32(_mm256_loadu_si256((__m256i*)(inout + i)));
auto in1 = cvt_bf16_to_fp32(_mm256_loadu_si256((__m256i*)(in + i)));
inout1 = _mm512_add_ps(inout1, in1);
_mm256_storeu_si256((__m256i*)(inout + i), cvt_fp32_to_bf16(inout1));
i += 16;
}
if (i < len) {
auto mask = (1 << (len - i)) - 1;
auto inout1 = cvt_bf16_to_fp32(_mm256_maskz_loadu_epi16(mask, inout + i));
auto in1 = cvt_bf16_to_fp32(_mm256_maskz_loadu_epi16(mask, in + i));
inout1 = _mm512_add_ps(inout1, in1);
_mm256_mask_storeu_epi16(inout + i, mask, cvt_fp32_to_bf16(inout1));
}
#else
ref::add_ker(inout, in, len);
#endif
}
static inline void add_ker(float* inout, float* in, int len) {
int i = 0;
#if defined(CPU_AVX512)
#pragma unroll(2)
for (i = 0; i < len - 31; i += 32) {
auto out1 = _mm512_loadu_ps(inout + i);
auto out2 = _mm512_loadu_ps(inout + i + 16);
auto in1 = _mm512_loadu_ps(in + i);
auto in2 = _mm512_loadu_ps(in + i + 16);
out1 = _mm512_add_ps(out1, in1);
out2 = _mm512_add_ps(out2, in2);
_mm512_storeu_ps(inout + i, out1);
_mm512_storeu_ps(inout + i + 16, out2);
}
if (i < len - 15) {
auto out1 = _mm512_loadu_ps(inout + i);
auto in1 = _mm512_loadu_ps(in + i);
_mm512_storeu_ps(inout + i, _mm512_add_ps(out1, in1));
i += 16;
}
if (i < len) {
auto mask = (1 << (len - i)) - 1;
auto out1 = _mm512_maskz_loadu_ps(mask, inout + i);
auto in1 = _mm512_maskz_loadu_ps(mask, in + i);
_mm512_mask_storeu_ps(inout + i, mask, _mm512_add_ps(out1, in1));
}
#else
ref::add_ker(inout, in, len);
#endif
}
static inline void add_ker(float* inout, at::BFloat16* in, int len) {
int i = 0;
#if defined(CPU_AVX512)
#pragma unroll(2)
for (i = 0; i < len - 31; i += 32) {
auto in1 = cvt_bf16_to_fp32(_mm256_loadu_si256((__m256i*)(in + i)));
auto in2 = cvt_bf16_to_fp32(_mm256_loadu_si256((__m256i*)(in + i + 16)));
auto inout1 = _mm512_loadu_ps(inout + i);
auto inout2 = _mm512_loadu_ps(inout + i + 16);
inout1 = _mm512_add_ps(inout1, in1);
inout2 = _mm512_add_ps(inout2, in2);
_mm512_storeu_ps(inout + i, inout1);
_mm512_storeu_ps(inout + i + 16, inout2);
}
if (i < len - 15) {
auto in1 = cvt_bf16_to_fp32(_mm256_loadu_si256((__m256i*)(in + i)));
auto inout1 = _mm512_loadu_ps(inout + i);
inout1 = _mm512_add_ps(inout1, in1);
_mm512_storeu_ps(inout + i, inout1);
i += 16;
}
if (i < len) {
auto mask = (1 << (len - i)) - 1;
auto in1 = cvt_bf16_to_fp32(_mm256_maskz_loadu_epi16(mask, in + i));
auto inout1 = _mm512_maskz_loadu_ps(mask, inout + i);
inout1 = _mm512_add_ps(inout1, in1);
_mm512_mask_storeu_ps(inout + i, mask, inout1);
}
#else
ref::add_ker(inout, in, len);
#endif
}
inline void add_ker(double* inout, double* in, int len) {
#pragma omp simd
for (int i = 0; i < len; i++) {
*(inout + i) += *(in + i);
}
}
static inline void move_ker(at::BFloat16* out, float* in, int64_t len) {
int64_t i = 0;
#if defined(CPU_AVX512)
#pragma unroll(4)
for (i = 0; i < len - 31; i += 32) {
auto in0 = cvt_fp32_to_bf16(_mm512_loadu_ps(in + i));
auto in1 = cvt_fp32_to_bf16(_mm512_loadu_ps(in + i + 16));
_mm256_storeu_si256((__m256i*)(out + i), in0);
_mm256_storeu_si256((__m256i*)(out + i + 16), in1);
}
if (i < len - 15) {
auto in0 = cvt_fp32_to_bf16(_mm512_loadu_ps(in + i));
_mm256_storeu_si256((__m256i*)(out + i), in0);
i += 16;
}
if (i < len) {
auto mask = ((1 << (len - i)) - 1);
auto in0 = cvt_fp32_to_bf16(_mm512_maskz_loadu_ps(mask, in + i));
_mm256_mask_storeu_epi16((__m256i*)(out + i), mask, in0);
}
#else
ref::mov_ker(out, in, len);
#endif
}
static inline void move_ker(float* out, const float* in, int64_t len) {
int64_t i = 0;
#if defined(CPU_AVX512)
#pragma unroll(4)
for (i = 0; i < len - 15; i += 16) {
auto in0 = _mm512_loadu_ps(in + i);
_mm512_storeu_ps(out + i, in0);
}
if (i < len) {
auto mask = ((1 << (len - i)) - 1);
auto in0 = _mm512_maskz_loadu_ps(mask, in + i);
_mm512_mask_storeu_ps(out + i, mask, in0);
}
#else
ref::mov_ker(out, in, len);
#endif
}
static inline void move_ker(
at::BFloat16* out,
const at::BFloat16* in,
int64_t len) {
int64_t i = 0;
#if defined(CPU_AVX512)
#pragma unroll(4)
for (i = 0; i < len - 31; i += 32) {
auto in0 = _mm512_loadu_si512(in + i);
_mm512_storeu_si512(out + i, in0);
}
if (i < len) {
auto mask = (1 << (len - i)) - 1;
auto in0 = _mm512_maskz_loadu_epi16(mask, in + i);
_mm512_mask_storeu_epi16(out + i, mask, in0);
}
#else
ref::mov_ker(out, in, len);
#endif
}
static inline void move_ker(int64_t* out, int64_t* in, int64_t len) {
int64_t i = 0;
#if defined(CPU_AVX512)
#pragma unroll(4)
for (i = 0; i < len - 7; i += 8) {
auto in0 = _mm512_loadu_pd(in + i);
_mm512_storeu_pd(out + i, in0);
}
if (i < len) {
auto mask = ((1 << (len - i)) - 1);
auto in0 = _mm512_maskz_loadu_pd(mask, in + i);
_mm512_mask_storeu_pd(out + i, mask, in0);
}
#else
ref::mov_ker(out, in, len);
#endif
}
static inline void move_ker(int32_t* out, const int32_t* in, int64_t len) {
int64_t i = 0;
#if defined(CPU_AVX512)
#pragma unroll(4)
for (i = 0; i < len - 15; i += 16) {
auto in0 = _mm512_loadu_ps(in + i);
_mm512_storeu_ps(out + i, in0);
}
if (i < len) {
auto mask = ((1 << (len - i)) - 1);
auto in0 = _mm512_maskz_loadu_ps(mask, in + i);
_mm512_mask_storeu_ps(out + i, mask, in0);
}
#else
ref::mov_ker(out, in, len);
#endif
}
static inline void move_ker(double* out, double* in, int len) {
#pragma omp simd
for (int i = 0; i < len; i++) {
*(out + i) = *(in + i);
}
}
static inline void zero_ker(double* out, int len) {
#pragma omp simd
for (int i = 0; i < len; i++) {
*(out + i) = 0;
}
}
static inline void zero_ker(float* out, int64_t len) {
int64_t i = 0;
#if defined(CPU_AVX512)
__m512 zero_512 = _mm512_setzero_ps();
#pragma unroll(4)
for (i = 0; i < len - 15; i += 16) {
_mm512_storeu_ps(out + i, zero_512);
}
if (i < len) {
auto mask = ((1 << (len - i)) - 1);
_mm512_mask_storeu_ps(out + i, mask, zero_512);
}
#else
memset(out, 0, len * sizeof(float));
#endif
}
static inline void zero_ker(at::BFloat16* out, int64_t len) {
int64_t i = 0;
#if defined(CPU_AVX512)
__m512i zero_512 = _mm512_setzero_si512();
#pragma unroll(4)
for (i = 0; i < len - 31; i += 32) {
_mm512_storeu_si512(out + i, zero_512);
}
if (i < len) {
auto mask = ((1 << (len - i)) - 1);
_mm512_mask_storeu_epi16(out + i, mask, zero_512);
}
#else
memset(out, 0, len * sizeof(at::BFloat16));
#endif
}
#if defined(CPU_AVX512)
inline __m512 convert_bf16_to_fp32(const __m256i src) {
__m512i y = _mm512_cvtepu16_epi32(src);
return _mm512_castsi512_ps(_mm512_bslli_epi128(y, 2));
}
#endif
template <typename T>
inline float toFloat(T val) {
float ret = float(val);
return ret;
}
template <typename T1, typename T2>
inline void madd_ker(T1* inout, T2* in, int len, float alpha) {
#pragma omp simd
for (long v = 0; v < len; v++) {
inout[v] += toFloat(in[v]) * alpha;
}
}
#if defined(CPU_AVX512)
template <>
inline void madd_ker(float* inout, at::BFloat16* in, int len, float alpha) {
__m512 vAlpha = _mm512_set1_ps(alpha);
int i = 0;
for (; i < len - 15; i += 16) {
__m512 y1 = _mm512_loadu_ps(inout + i);
__m512 y2 = convert_bf16_to_fp32(_mm256_loadu_si256((__m256i*)(in + i)));
y1 = _mm512_fmadd_ps(vAlpha, y2, y1);
_mm512_storeu_ps(inout + i, y1);
}
if (i < len) {
int rem = len - i;
__mmask16 mask = (1 << rem) - 1;
__m512 y1 = _mm512_maskz_loadu_ps(mask, inout + i);
__m512 y2 = convert_bf16_to_fp32(_mm256_maskz_loadu_epi16(mask, in + i));
y1 = _mm512_fmadd_ps(vAlpha, y2, y1);
_mm512_mask_storeu_ps(inout + i, mask, y1);
}
}
#endif
|
DRB062-matrixvector2-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Matrix-vector multiplication: inner level parallelization.
*/
#define N 1000
double a[N][N],v[N],v_out[N];
int init()
{
int i,j,k;
#pragma omp parallel for private(i ,j )
for (i = 0; i < N; i++) {
#pragma omp parallel for private(j )
for (j = 0; j < N; j++) {
a[i][j] = i * j;
}
v_out[i] = i * j;
v[i] = i * j;
}
return 0;
}
void mv()
{
int i,j;
#pragma omp parallel for private(i ,j )
for (i = 0; i < N; i++)
{
float sum = 0.0;
#pragma omp parallel for private(j ) reduction(+:sum)
for (j = 0; j < N; j++)
{
sum += a[i][j]*v[j];
}
v_out[i] = sum;
}
}
int print()
{
int i,j,k;
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
printf("%lf\n", a[i][j]);
}
printf("%lf\n",v_out[i]);
printf("%lf\n",v[i]);
}
return 0;
}
int main()
{
init();
mv();
print();
return 0;
}
|
test.c | // RUN: %libomptarget-compile-run-and-check-aarch64-unknown-linux-gnu
// RUN: %libomptarget-compile-run-and-check-powerpc64-ibm-linux-gnu
// RUN: %libomptarget-compile-run-and-check-powerpc64le-ibm-linux-gnu
// RUN: %libomptarget-compile-run-and-check-x86_64-pc-linux-gnu
#define IN_PARALLEL 0
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#define N 1000
#pragma omp requires unified_shared_memory
/*
* Test if it is possible to:
* 1. target enter data to depend 'in' and 'out'
* 2. target exit data to depend 'in' and 'out'
* 3. Mix target-based tasks with host tasks.
*/
int main(){
int errors = 0;
bool isHost = true;
double sum = 0.0;
double* h_array = (double *) malloc(N * sizeof(double));
double* in_1 = (double *) malloc(N * sizeof(double));
double* in_2 = (double *) malloc(N * sizeof(double));
#if IN_PARALLEL
#pragma omp parallel
{
#pragma omp master
{
#endif
// host task
#pragma omp task depend(out: in_1) shared(in_1)
{
for (int i = 0; i < N; ++i) {
in_1[i] = 1;
}
}
// host task
#pragma omp task depend(out: in_2) shared(in_2)
{
for (int i = 0; i < N; ++i) {
in_2[i] = 2;
}
}
// target enter data
#pragma omp target enter data nowait map(alloc: h_array[0:N]) map(to: in_1[0:N]) map(to: in_2[0:N]) depend(out: h_array) depend(in: in_1) depend(in: in_2)
// target task to compute on the device
#pragma omp target nowait map(tofrom: isHost) depend(inout: h_array)
{
isHost = omp_is_initial_device();
for (int i = 0; i < N; ++i) {
h_array[i] = in_1[i]*in_2[i];
}
}
// target exit data
#pragma omp target exit data nowait map(from: h_array[0:N]) depend(inout: h_array)
// host task
#pragma omp task depend(in: h_array) shared(sum, h_array)
{
// checking results
for (int i = 0; i < N; ++i) {
sum += h_array[i];
}
}
#if IN_PARALLEL
} // master
} // parallel
#else
#pragma omp taskwait
#endif
errors = 2.0*N != sum;
if (!errors)
printf("Test passed\n");
else
printf("Test failed on %s: sum = %g\n", (isHost ? "host" : "device"), sum);
return errors;
}
|
ASTMatchers.h | //===- ASTMatchers.h - Structural query framework ---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements matchers to be used together with the MatchFinder to
// match AST nodes.
//
// Matchers are created by generator functions, which can be combined in
// a functional in-language DSL to express queries over the C++ AST.
//
// For example, to match a class with a certain name, one would call:
// cxxRecordDecl(hasName("MyClass"))
// which returns a matcher that can be used to find all AST nodes that declare
// a class named 'MyClass'.
//
// For more complicated match expressions we're often interested in accessing
// multiple parts of the matched AST nodes once a match is found. In that case,
// call `.bind("name")` on match expressions that match the nodes you want to
// access.
//
// For example, when we're interested in child classes of a certain class, we
// would write:
// cxxRecordDecl(hasName("MyClass"), has(recordDecl().bind("child")))
// When the match is found via the MatchFinder, a user provided callback will
// be called with a BoundNodes instance that contains a mapping from the
// strings that we provided for the `.bind()` calls to the nodes that were
// matched.
// In the given example, each time our matcher finds a match we get a callback
// where "child" is bound to the RecordDecl node of the matching child
// class declaration.
//
// See ASTMatchersInternal.h for a more in-depth explanation of the
// implementation details of the matcher framework.
//
// See ASTMatchFinder.h for how to use the generated matchers to run over
// an AST.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
#define LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTTypeTraits.h"
#include "clang/AST/Attr.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclFriend.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/LambdaCapture.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/OperationKinds.h"
#include "clang/AST/ParentMapContext.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/TemplateBase.h"
#include "clang/AST/TemplateName.h"
#include "clang/AST/Type.h"
#include "clang/AST/TypeLoc.h"
#include "clang/ASTMatchers/ASTMatchersInternal.h"
#include "clang/ASTMatchers/ASTMatchersMacros.h"
#include "clang/Basic/AttrKinds.h"
#include "clang/Basic/ExceptionSpecificationType.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TypeTraits.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Regex.h"
#include <cassert>
#include <cstddef>
#include <iterator>
#include <limits>
#include <string>
#include <utility>
#include <vector>
namespace clang {
namespace ast_matchers {
/// Maps string IDs to AST nodes matched by parts of a matcher.
///
/// The bound nodes are generated by calling \c bind("id") on the node matchers
/// of the nodes we want to access later.
///
/// The instances of BoundNodes are created by \c MatchFinder when the user's
/// callbacks are executed every time a match is found.
class BoundNodes {
public:
/// Returns the AST node bound to \c ID.
///
/// Returns NULL if there was no node bound to \c ID or if there is a node but
/// it cannot be converted to the specified type.
template <typename T>
const T *getNodeAs(StringRef ID) const {
return MyBoundNodes.getNodeAs<T>(ID);
}
/// Type of mapping from binding identifiers to bound nodes. This type
/// is an associative container with a key type of \c std::string and a value
/// type of \c clang::DynTypedNode
using IDToNodeMap = internal::BoundNodesMap::IDToNodeMap;
/// Retrieve mapping from binding identifiers to bound nodes.
const IDToNodeMap &getMap() const {
return MyBoundNodes.getMap();
}
private:
friend class internal::BoundNodesTreeBuilder;
/// Create BoundNodes from a pre-filled map of bindings.
BoundNodes(internal::BoundNodesMap &MyBoundNodes)
: MyBoundNodes(MyBoundNodes) {}
internal::BoundNodesMap MyBoundNodes;
};
/// Types of matchers for the top-level classes in the AST class
/// hierarchy.
/// @{
using DeclarationMatcher = internal::Matcher<Decl>;
using StatementMatcher = internal::Matcher<Stmt>;
using TypeMatcher = internal::Matcher<QualType>;
using TypeLocMatcher = internal::Matcher<TypeLoc>;
using NestedNameSpecifierMatcher = internal::Matcher<NestedNameSpecifier>;
using NestedNameSpecifierLocMatcher = internal::Matcher<NestedNameSpecifierLoc>;
using CXXCtorInitializerMatcher = internal::Matcher<CXXCtorInitializer>;
/// @}
/// Matches any node.
///
/// Useful when another matcher requires a child matcher, but there's no
/// additional constraint. This will often be used with an explicit conversion
/// to an \c internal::Matcher<> type such as \c TypeMatcher.
///
/// Example: \c DeclarationMatcher(anything()) matches all declarations, e.g.,
/// \code
/// "int* p" and "void f()" in
/// int* p;
/// void f();
/// \endcode
///
/// Usable as: Any Matcher
inline internal::TrueMatcher anything() { return internal::TrueMatcher(); }
/// Matches the top declaration context.
///
/// Given
/// \code
/// int X;
/// namespace NS {
/// int Y;
/// } // namespace NS
/// \endcode
/// decl(hasDeclContext(translationUnitDecl()))
/// matches "int X", but not "int Y".
extern const internal::VariadicDynCastAllOfMatcher<Decl, TranslationUnitDecl>
translationUnitDecl;
/// Matches typedef declarations.
///
/// Given
/// \code
/// typedef int X;
/// using Y = int;
/// \endcode
/// typedefDecl()
/// matches "typedef int X", but not "using Y = int"
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefDecl>
typedefDecl;
/// Matches typedef name declarations.
///
/// Given
/// \code
/// typedef int X;
/// using Y = int;
/// \endcode
/// typedefNameDecl()
/// matches "typedef int X" and "using Y = int"
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefNameDecl>
typedefNameDecl;
/// Matches type alias declarations.
///
/// Given
/// \code
/// typedef int X;
/// using Y = int;
/// \endcode
/// typeAliasDecl()
/// matches "using Y = int", but not "typedef int X"
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasDecl>
typeAliasDecl;
/// Matches type alias template declarations.
///
/// typeAliasTemplateDecl() matches
/// \code
/// template <typename T>
/// using Y = X<T>;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasTemplateDecl>
typeAliasTemplateDecl;
/// Matches AST nodes that were expanded within the main-file.
///
/// Example matches X but not Y
/// (matcher = cxxRecordDecl(isExpansionInMainFile())
/// \code
/// #include <Y.h>
/// class X {};
/// \endcode
/// Y.h:
/// \code
/// class Y {};
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
AST_POLYMORPHIC_MATCHER(isExpansionInMainFile,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
return SourceManager.isInMainFile(
SourceManager.getExpansionLoc(Node.getBeginLoc()));
}
/// Matches AST nodes that were expanded within system-header-files.
///
/// Example matches Y but not X
/// (matcher = cxxRecordDecl(isExpansionInSystemHeader())
/// \code
/// #include <SystemHeader.h>
/// class X {};
/// \endcode
/// SystemHeader.h:
/// \code
/// class Y {};
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
AST_POLYMORPHIC_MATCHER(isExpansionInSystemHeader,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc());
if (ExpansionLoc.isInvalid()) {
return false;
}
return SourceManager.isInSystemHeader(ExpansionLoc);
}
/// Matches AST nodes that were expanded within files whose name is
/// partially matching a given regex.
///
/// Example matches Y but not X
/// (matcher = cxxRecordDecl(isExpansionInFileMatching("AST.*"))
/// \code
/// #include "ASTMatcher.h"
/// class X {};
/// \endcode
/// ASTMatcher.h:
/// \code
/// class Y {};
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
AST_POLYMORPHIC_MATCHER_P(isExpansionInFileMatching,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc),
std::string, RegExp) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc());
if (ExpansionLoc.isInvalid()) {
return false;
}
auto FileEntry =
SourceManager.getFileEntryForID(SourceManager.getFileID(ExpansionLoc));
if (!FileEntry) {
return false;
}
auto Filename = FileEntry->getName();
llvm::Regex RE(RegExp);
return RE.match(Filename);
}
/// Matches statements that are (transitively) expanded from the named macro.
/// Does not match if only part of the statement is expanded from that macro or
/// if different parts of the the statement are expanded from different
/// appearances of the macro.
///
/// FIXME: Change to be a polymorphic matcher that works on any syntactic
/// node. There's nothing `Stmt`-specific about it.
AST_MATCHER_P(Stmt, isExpandedFromMacro, llvm::StringRef, MacroName) {
// Verifies that the statement' beginning and ending are both expanded from
// the same instance of the given macro.
auto& Context = Finder->getASTContext();
llvm::Optional<SourceLocation> B =
internal::getExpansionLocOfMacro(MacroName, Node.getBeginLoc(), Context);
if (!B) return false;
llvm::Optional<SourceLocation> E =
internal::getExpansionLocOfMacro(MacroName, Node.getEndLoc(), Context);
if (!E) return false;
return *B == *E;
}
/// Matches declarations.
///
/// Examples matches \c X, \c C, and the friend declaration inside \c C;
/// \code
/// void X();
/// class C {
/// friend X;
/// };
/// \endcode
extern const internal::VariadicAllOfMatcher<Decl> decl;
/// Matches a declaration of a linkage specification.
///
/// Given
/// \code
/// extern "C" {}
/// \endcode
/// linkageSpecDecl()
/// matches "extern "C" {}"
extern const internal::VariadicDynCastAllOfMatcher<Decl, LinkageSpecDecl>
linkageSpecDecl;
/// Matches a declaration of anything that could have a name.
///
/// Example matches \c X, \c S, the anonymous union type, \c i, and \c U;
/// \code
/// typedef int X;
/// struct S {
/// union {
/// int i;
/// } U;
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, NamedDecl> namedDecl;
/// Matches a declaration of label.
///
/// Given
/// \code
/// goto FOO;
/// FOO: bar();
/// \endcode
/// labelDecl()
/// matches 'FOO:'
extern const internal::VariadicDynCastAllOfMatcher<Decl, LabelDecl> labelDecl;
/// Matches a declaration of a namespace.
///
/// Given
/// \code
/// namespace {}
/// namespace test {}
/// \endcode
/// namespaceDecl()
/// matches "namespace {}" and "namespace test {}"
extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceDecl>
namespaceDecl;
/// Matches a declaration of a namespace alias.
///
/// Given
/// \code
/// namespace test {}
/// namespace alias = ::test;
/// \endcode
/// namespaceAliasDecl()
/// matches "namespace alias" but not "namespace test"
extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceAliasDecl>
namespaceAliasDecl;
/// Matches class, struct, and union declarations.
///
/// Example matches \c X, \c Z, \c U, and \c S
/// \code
/// class X;
/// template<class T> class Z {};
/// struct S {};
/// union U {};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, RecordDecl> recordDecl;
/// Matches C++ class declarations.
///
/// Example matches \c X, \c Z
/// \code
/// class X;
/// template<class T> class Z {};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXRecordDecl>
cxxRecordDecl;
/// Matches C++ class template declarations.
///
/// Example matches \c Z
/// \code
/// template<class T> class Z {};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ClassTemplateDecl>
classTemplateDecl;
/// Matches C++ class template specializations.
///
/// Given
/// \code
/// template<typename T> class A {};
/// template<> class A<double> {};
/// A<int> a;
/// \endcode
/// classTemplateSpecializationDecl()
/// matches the specializations \c A<int> and \c A<double>
extern const internal::VariadicDynCastAllOfMatcher<
Decl, ClassTemplateSpecializationDecl>
classTemplateSpecializationDecl;
/// Matches C++ class template partial specializations.
///
/// Given
/// \code
/// template<class T1, class T2, int I>
/// class A {};
///
/// template<class T, int I>
/// class A<T, T*, I> {};
///
/// template<>
/// class A<int, int, 1> {};
/// \endcode
/// classTemplatePartialSpecializationDecl()
/// matches the specialization \c A<T,T*,I> but not \c A<int,int,1>
extern const internal::VariadicDynCastAllOfMatcher<
Decl, ClassTemplatePartialSpecializationDecl>
classTemplatePartialSpecializationDecl;
/// Matches declarator declarations (field, variable, function
/// and non-type template parameter declarations).
///
/// Given
/// \code
/// class X { int y; };
/// \endcode
/// declaratorDecl()
/// matches \c int y.
extern const internal::VariadicDynCastAllOfMatcher<Decl, DeclaratorDecl>
declaratorDecl;
/// Matches parameter variable declarations.
///
/// Given
/// \code
/// void f(int x);
/// \endcode
/// parmVarDecl()
/// matches \c int x.
extern const internal::VariadicDynCastAllOfMatcher<Decl, ParmVarDecl>
parmVarDecl;
/// Matches C++ access specifier declarations.
///
/// Given
/// \code
/// class C {
/// public:
/// int a;
/// };
/// \endcode
/// accessSpecDecl()
/// matches 'public:'
extern const internal::VariadicDynCastAllOfMatcher<Decl, AccessSpecDecl>
accessSpecDecl;
/// Matches constructor initializers.
///
/// Examples matches \c i(42).
/// \code
/// class C {
/// C() : i(42) {}
/// int i;
/// };
/// \endcode
extern const internal::VariadicAllOfMatcher<CXXCtorInitializer>
cxxCtorInitializer;
/// Matches template arguments.
///
/// Given
/// \code
/// template <typename T> struct C {};
/// C<int> c;
/// \endcode
/// templateArgument()
/// matches 'int' in C<int>.
extern const internal::VariadicAllOfMatcher<TemplateArgument> templateArgument;
/// Matches template name.
///
/// Given
/// \code
/// template <typename T> class X { };
/// X<int> xi;
/// \endcode
/// templateName()
/// matches 'X' in X<int>.
extern const internal::VariadicAllOfMatcher<TemplateName> templateName;
/// Matches non-type template parameter declarations.
///
/// Given
/// \code
/// template <typename T, int N> struct C {};
/// \endcode
/// nonTypeTemplateParmDecl()
/// matches 'N', but not 'T'.
extern const internal::VariadicDynCastAllOfMatcher<Decl,
NonTypeTemplateParmDecl>
nonTypeTemplateParmDecl;
/// Matches template type parameter declarations.
///
/// Given
/// \code
/// template <typename T, int N> struct C {};
/// \endcode
/// templateTypeParmDecl()
/// matches 'T', but not 'N'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, TemplateTypeParmDecl>
templateTypeParmDecl;
/// Matches public C++ declarations and C++ base specifers that specify public
/// inheritance.
///
/// Examples:
/// \code
/// class C {
/// public: int a; // fieldDecl(isPublic()) matches 'a'
/// protected: int b;
/// private: int c;
/// };
/// \endcode
///
/// \code
/// class Base {};
/// class Derived1 : public Base {}; // matches 'Base'
/// struct Derived2 : Base {}; // matches 'Base'
/// \endcode
AST_POLYMORPHIC_MATCHER(isPublic,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl,
CXXBaseSpecifier)) {
return getAccessSpecifier(Node) == AS_public;
}
/// Matches protected C++ declarations and C++ base specifers that specify
/// protected inheritance.
///
/// Examples:
/// \code
/// class C {
/// public: int a;
/// protected: int b; // fieldDecl(isProtected()) matches 'b'
/// private: int c;
/// };
/// \endcode
///
/// \code
/// class Base {};
/// class Derived : protected Base {}; // matches 'Base'
/// \endcode
AST_POLYMORPHIC_MATCHER(isProtected,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl,
CXXBaseSpecifier)) {
return getAccessSpecifier(Node) == AS_protected;
}
/// Matches private C++ declarations and C++ base specifers that specify private
/// inheritance.
///
/// Examples:
/// \code
/// class C {
/// public: int a;
/// protected: int b;
/// private: int c; // fieldDecl(isPrivate()) matches 'c'
/// };
/// \endcode
///
/// \code
/// struct Base {};
/// struct Derived1 : private Base {}; // matches 'Base'
/// class Derived2 : Base {}; // matches 'Base'
/// \endcode
AST_POLYMORPHIC_MATCHER(isPrivate,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl,
CXXBaseSpecifier)) {
return getAccessSpecifier(Node) == AS_private;
}
/// Matches non-static data members that are bit-fields.
///
/// Given
/// \code
/// class C {
/// int a : 2;
/// int b;
/// };
/// \endcode
/// fieldDecl(isBitField())
/// matches 'int a;' but not 'int b;'.
AST_MATCHER(FieldDecl, isBitField) {
return Node.isBitField();
}
/// Matches non-static data members that are bit-fields of the specified
/// bit width.
///
/// Given
/// \code
/// class C {
/// int a : 2;
/// int b : 4;
/// int c : 2;
/// };
/// \endcode
/// fieldDecl(hasBitWidth(2))
/// matches 'int a;' and 'int c;' but not 'int b;'.
AST_MATCHER_P(FieldDecl, hasBitWidth, unsigned, Width) {
return Node.isBitField() &&
Node.getBitWidthValue(Finder->getASTContext()) == Width;
}
/// Matches non-static data members that have an in-class initializer.
///
/// Given
/// \code
/// class C {
/// int a = 2;
/// int b = 3;
/// int c;
/// };
/// \endcode
/// fieldDecl(hasInClassInitializer(integerLiteral(equals(2))))
/// matches 'int a;' but not 'int b;'.
/// fieldDecl(hasInClassInitializer(anything()))
/// matches 'int a;' and 'int b;' but not 'int c;'.
AST_MATCHER_P(FieldDecl, hasInClassInitializer, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *Initializer = Node.getInClassInitializer();
return (Initializer != nullptr &&
InnerMatcher.matches(*Initializer, Finder, Builder));
}
/// Determines whether the function is "main", which is the entry point
/// into an executable program.
AST_MATCHER(FunctionDecl, isMain) {
return Node.isMain();
}
/// Matches the specialized template of a specialization declaration.
///
/// Given
/// \code
/// template<typename T> class A {}; #1
/// template<> class A<int> {}; #2
/// \endcode
/// classTemplateSpecializationDecl(hasSpecializedTemplate(classTemplateDecl()))
/// matches '#2' with classTemplateDecl() matching the class template
/// declaration of 'A' at #1.
AST_MATCHER_P(ClassTemplateSpecializationDecl, hasSpecializedTemplate,
internal::Matcher<ClassTemplateDecl>, InnerMatcher) {
const ClassTemplateDecl* Decl = Node.getSpecializedTemplate();
return (Decl != nullptr &&
InnerMatcher.matches(*Decl, Finder, Builder));
}
/// Matches a declaration that has been implicitly added
/// by the compiler (eg. implicit default/copy constructors).
AST_MATCHER(Decl, isImplicit) {
return Node.isImplicit();
}
/// Matches classTemplateSpecializations, templateSpecializationType and
/// functionDecl that have at least one TemplateArgument matching the given
/// InnerMatcher.
///
/// Given
/// \code
/// template<typename T> class A {};
/// template<> class A<double> {};
/// A<int> a;
///
/// template<typename T> f() {};
/// void func() { f<int>(); };
/// \endcode
///
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToType(asString("int"))))
/// matches the specialization \c A<int>
///
/// functionDecl(hasAnyTemplateArgument(refersToType(asString("int"))))
/// matches the specialization \c f<int>
AST_POLYMORPHIC_MATCHER_P(
hasAnyTemplateArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType,
FunctionDecl),
internal::Matcher<TemplateArgument>, InnerMatcher) {
ArrayRef<TemplateArgument> List =
internal::getTemplateSpecializationArgs(Node);
return matchesFirstInRange(InnerMatcher, List.begin(), List.end(), Finder,
Builder);
}
/// Causes all nested matchers to be matched with the specified traversal kind.
///
/// Given
/// \code
/// void foo()
/// {
/// int i = 3.0;
/// }
/// \endcode
/// The matcher
/// \code
/// traverse(TK_IgnoreImplicitCastsAndParentheses,
/// varDecl(hasInitializer(floatLiteral().bind("init")))
/// )
/// \endcode
/// matches the variable declaration with "init" bound to the "3.0".
template <typename T>
internal::Matcher<T> traverse(TraversalKind TK,
const internal::Matcher<T> &InnerMatcher) {
return internal::DynTypedMatcher::constructRestrictedWrapper(
new internal::TraversalMatcher<T>(TK, InnerMatcher),
InnerMatcher.getID().first)
.template unconditionalConvertTo<T>();
}
template <typename T>
internal::BindableMatcher<T>
traverse(TraversalKind TK, const internal::BindableMatcher<T> &InnerMatcher) {
return internal::BindableMatcher<T>(
internal::DynTypedMatcher::constructRestrictedWrapper(
new internal::TraversalMatcher<T>(TK, InnerMatcher),
InnerMatcher.getID().first)
.template unconditionalConvertTo<T>());
}
template <typename... T>
internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>>
traverse(TraversalKind TK,
const internal::VariadicOperatorMatcher<T...> &InnerMatcher) {
return internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>>(
TK, InnerMatcher);
}
template <template <typename ToArg, typename FromArg> class ArgumentAdapterT,
typename T, typename ToTypes>
internal::TraversalWrapper<
internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T, ToTypes>>
traverse(TraversalKind TK, const internal::ArgumentAdaptingMatcherFuncAdaptor<
ArgumentAdapterT, T, ToTypes> &InnerMatcher) {
return internal::TraversalWrapper<
internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T,
ToTypes>>(TK, InnerMatcher);
}
template <template <typename T, typename P1> class MatcherT, typename P1,
typename ReturnTypesF>
internal::TraversalWrapper<
internal::PolymorphicMatcherWithParam1<MatcherT, P1, ReturnTypesF>>
traverse(TraversalKind TK, const internal::PolymorphicMatcherWithParam1<
MatcherT, P1, ReturnTypesF> &InnerMatcher) {
return internal::TraversalWrapper<
internal::PolymorphicMatcherWithParam1<MatcherT, P1, ReturnTypesF>>(
TK, InnerMatcher);
}
template <template <typename T, typename P1, typename P2> class MatcherT,
typename P1, typename P2, typename ReturnTypesF>
internal::TraversalWrapper<
internal::PolymorphicMatcherWithParam2<MatcherT, P1, P2, ReturnTypesF>>
traverse(TraversalKind TK, const internal::PolymorphicMatcherWithParam2<
MatcherT, P1, P2, ReturnTypesF> &InnerMatcher) {
return internal::TraversalWrapper<
internal::PolymorphicMatcherWithParam2<MatcherT, P1, P2, ReturnTypesF>>(
TK, InnerMatcher);
}
/// Matches expressions that match InnerMatcher after any implicit AST
/// nodes are stripped off.
///
/// Parentheses and explicit casts are not discarded.
/// Given
/// \code
/// class C {};
/// C a = C();
/// C b;
/// C c = b;
/// \endcode
/// The matchers
/// \code
/// varDecl(hasInitializer(ignoringImplicit(cxxConstructExpr())))
/// \endcode
/// would match the declarations for a, b, and c.
/// While
/// \code
/// varDecl(hasInitializer(cxxConstructExpr()))
/// \endcode
/// only match the declarations for b and c.
AST_MATCHER_P(Expr, ignoringImplicit, internal::Matcher<Expr>,
InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreImplicit(), Finder, Builder);
}
/// Matches expressions that match InnerMatcher after any implicit casts
/// are stripped off.
///
/// Parentheses and explicit casts are not discarded.
/// Given
/// \code
/// int arr[5];
/// int a = 0;
/// char b = 0;
/// const int c = a;
/// int *d = arr;
/// long e = (long) 0l;
/// \endcode
/// The matchers
/// \code
/// varDecl(hasInitializer(ignoringImpCasts(integerLiteral())))
/// varDecl(hasInitializer(ignoringImpCasts(declRefExpr())))
/// \endcode
/// would match the declarations for a, b, c, and d, but not e.
/// While
/// \code
/// varDecl(hasInitializer(integerLiteral()))
/// varDecl(hasInitializer(declRefExpr()))
/// \endcode
/// only match the declarations for b, c, and d.
AST_MATCHER_P(Expr, ignoringImpCasts,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreImpCasts(), Finder, Builder);
}
/// Matches expressions that match InnerMatcher after parentheses and
/// casts are stripped off.
///
/// Implicit and non-C Style casts are also discarded.
/// Given
/// \code
/// int a = 0;
/// char b = (0);
/// void* c = reinterpret_cast<char*>(0);
/// char d = char(0);
/// \endcode
/// The matcher
/// varDecl(hasInitializer(ignoringParenCasts(integerLiteral())))
/// would match the declarations for a, b, c, and d.
/// while
/// varDecl(hasInitializer(integerLiteral()))
/// only match the declaration for a.
AST_MATCHER_P(Expr, ignoringParenCasts, internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreParenCasts(), Finder, Builder);
}
/// Matches expressions that match InnerMatcher after implicit casts and
/// parentheses are stripped off.
///
/// Explicit casts are not discarded.
/// Given
/// \code
/// int arr[5];
/// int a = 0;
/// char b = (0);
/// const int c = a;
/// int *d = (arr);
/// long e = ((long) 0l);
/// \endcode
/// The matchers
/// varDecl(hasInitializer(ignoringParenImpCasts(integerLiteral())))
/// varDecl(hasInitializer(ignoringParenImpCasts(declRefExpr())))
/// would match the declarations for a, b, c, and d, but not e.
/// while
/// varDecl(hasInitializer(integerLiteral()))
/// varDecl(hasInitializer(declRefExpr()))
/// would only match the declaration for a.
AST_MATCHER_P(Expr, ignoringParenImpCasts,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreParenImpCasts(), Finder, Builder);
}
/// Matches types that match InnerMatcher after any parens are stripped.
///
/// Given
/// \code
/// void (*fp)(void);
/// \endcode
/// The matcher
/// \code
/// varDecl(hasType(pointerType(pointee(ignoringParens(functionType())))))
/// \endcode
/// would match the declaration for fp.
AST_MATCHER_P_OVERLOAD(QualType, ignoringParens, internal::Matcher<QualType>,
InnerMatcher, 0) {
return InnerMatcher.matches(Node.IgnoreParens(), Finder, Builder);
}
/// Overload \c ignoringParens for \c Expr.
///
/// Given
/// \code
/// const char* str = ("my-string");
/// \endcode
/// The matcher
/// \code
/// implicitCastExpr(hasSourceExpression(ignoringParens(stringLiteral())))
/// \endcode
/// would match the implicit cast resulting from the assignment.
AST_MATCHER_P_OVERLOAD(Expr, ignoringParens, internal::Matcher<Expr>,
InnerMatcher, 1) {
const Expr *E = Node.IgnoreParens();
return InnerMatcher.matches(*E, Finder, Builder);
}
/// Matches expressions that are instantiation-dependent even if it is
/// neither type- nor value-dependent.
///
/// In the following example, the expression sizeof(sizeof(T() + T()))
/// is instantiation-dependent (since it involves a template parameter T),
/// but is neither type- nor value-dependent, since the type of the inner
/// sizeof is known (std::size_t) and therefore the size of the outer
/// sizeof is known.
/// \code
/// template<typename T>
/// void f(T x, T y) { sizeof(sizeof(T() + T()); }
/// \endcode
/// expr(isInstantiationDependent()) matches sizeof(sizeof(T() + T())
AST_MATCHER(Expr, isInstantiationDependent) {
return Node.isInstantiationDependent();
}
/// Matches expressions that are type-dependent because the template type
/// is not yet instantiated.
///
/// For example, the expressions "x" and "x + y" are type-dependent in
/// the following code, but "y" is not type-dependent:
/// \code
/// template<typename T>
/// void add(T x, int y) {
/// x + y;
/// }
/// \endcode
/// expr(isTypeDependent()) matches x + y
AST_MATCHER(Expr, isTypeDependent) { return Node.isTypeDependent(); }
/// Matches expression that are value-dependent because they contain a
/// non-type template parameter.
///
/// For example, the array bound of "Chars" in the following example is
/// value-dependent.
/// \code
/// template<int Size> int f() { return Size; }
/// \endcode
/// expr(isValueDependent()) matches return Size
AST_MATCHER(Expr, isValueDependent) { return Node.isValueDependent(); }
/// Matches classTemplateSpecializations, templateSpecializationType and
/// functionDecl where the n'th TemplateArgument matches the given InnerMatcher.
///
/// Given
/// \code
/// template<typename T, typename U> class A {};
/// A<bool, int> b;
/// A<int, bool> c;
///
/// template<typename T> void f() {}
/// void func() { f<int>(); };
/// \endcode
/// classTemplateSpecializationDecl(hasTemplateArgument(
/// 1, refersToType(asString("int"))))
/// matches the specialization \c A<bool, int>
///
/// functionDecl(hasTemplateArgument(0, refersToType(asString("int"))))
/// matches the specialization \c f<int>
AST_POLYMORPHIC_MATCHER_P2(
hasTemplateArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType,
FunctionDecl),
unsigned, N, internal::Matcher<TemplateArgument>, InnerMatcher) {
ArrayRef<TemplateArgument> List =
internal::getTemplateSpecializationArgs(Node);
if (List.size() <= N)
return false;
return InnerMatcher.matches(List[N], Finder, Builder);
}
/// Matches if the number of template arguments equals \p N.
///
/// Given
/// \code
/// template<typename T> struct C {};
/// C<int> c;
/// \endcode
/// classTemplateSpecializationDecl(templateArgumentCountIs(1))
/// matches C<int>.
AST_POLYMORPHIC_MATCHER_P(
templateArgumentCountIs,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType),
unsigned, N) {
return internal::getTemplateSpecializationArgs(Node).size() == N;
}
/// Matches a TemplateArgument that refers to a certain type.
///
/// Given
/// \code
/// struct X {};
/// template<typename T> struct A {};
/// A<X> a;
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToType(class(hasName("X")))))
/// matches the specialization \c A<X>
AST_MATCHER_P(TemplateArgument, refersToType,
internal::Matcher<QualType>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Type)
return false;
return InnerMatcher.matches(Node.getAsType(), Finder, Builder);
}
/// Matches a TemplateArgument that refers to a certain template.
///
/// Given
/// \code
/// template<template <typename> class S> class X {};
/// template<typename T> class Y {};
/// X<Y> xi;
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToTemplate(templateName())))
/// matches the specialization \c X<Y>
AST_MATCHER_P(TemplateArgument, refersToTemplate,
internal::Matcher<TemplateName>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Template)
return false;
return InnerMatcher.matches(Node.getAsTemplate(), Finder, Builder);
}
/// Matches a canonical TemplateArgument that refers to a certain
/// declaration.
///
/// Given
/// \code
/// struct B { int next; };
/// template<int(B::*next_ptr)> struct A {};
/// A<&B::next> a;
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToDeclaration(fieldDecl(hasName("next")))))
/// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching
/// \c B::next
AST_MATCHER_P(TemplateArgument, refersToDeclaration,
internal::Matcher<Decl>, InnerMatcher) {
if (Node.getKind() == TemplateArgument::Declaration)
return InnerMatcher.matches(*Node.getAsDecl(), Finder, Builder);
return false;
}
/// Matches a sugar TemplateArgument that refers to a certain expression.
///
/// Given
/// \code
/// struct B { int next; };
/// template<int(B::*next_ptr)> struct A {};
/// A<&B::next> a;
/// \endcode
/// templateSpecializationType(hasAnyTemplateArgument(
/// isExpr(hasDescendant(declRefExpr(to(fieldDecl(hasName("next"))))))))
/// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching
/// \c B::next
AST_MATCHER_P(TemplateArgument, isExpr, internal::Matcher<Expr>, InnerMatcher) {
if (Node.getKind() == TemplateArgument::Expression)
return InnerMatcher.matches(*Node.getAsExpr(), Finder, Builder);
return false;
}
/// Matches a TemplateArgument that is an integral value.
///
/// Given
/// \code
/// template<int T> struct C {};
/// C<42> c;
/// \endcode
/// classTemplateSpecializationDecl(
/// hasAnyTemplateArgument(isIntegral()))
/// matches the implicit instantiation of C in C<42>
/// with isIntegral() matching 42.
AST_MATCHER(TemplateArgument, isIntegral) {
return Node.getKind() == TemplateArgument::Integral;
}
/// Matches a TemplateArgument that referes to an integral type.
///
/// Given
/// \code
/// template<int T> struct C {};
/// C<42> c;
/// \endcode
/// classTemplateSpecializationDecl(
/// hasAnyTemplateArgument(refersToIntegralType(asString("int"))))
/// matches the implicit instantiation of C in C<42>.
AST_MATCHER_P(TemplateArgument, refersToIntegralType,
internal::Matcher<QualType>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Integral)
return false;
return InnerMatcher.matches(Node.getIntegralType(), Finder, Builder);
}
/// Matches a TemplateArgument of integral type with a given value.
///
/// Note that 'Value' is a string as the template argument's value is
/// an arbitrary precision integer. 'Value' must be euqal to the canonical
/// representation of that integral value in base 10.
///
/// Given
/// \code
/// template<int T> struct C {};
/// C<42> c;
/// \endcode
/// classTemplateSpecializationDecl(
/// hasAnyTemplateArgument(equalsIntegralValue("42")))
/// matches the implicit instantiation of C in C<42>.
AST_MATCHER_P(TemplateArgument, equalsIntegralValue,
std::string, Value) {
if (Node.getKind() != TemplateArgument::Integral)
return false;
return Node.getAsIntegral().toString(10) == Value;
}
/// Matches an Objective-C autorelease pool statement.
///
/// Given
/// \code
/// @autoreleasepool {
/// int x = 0;
/// }
/// \endcode
/// autoreleasePoolStmt(stmt()) matches the declaration of "x"
/// inside the autorelease pool.
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
ObjCAutoreleasePoolStmt> autoreleasePoolStmt;
/// Matches any value declaration.
///
/// Example matches A, B, C and F
/// \code
/// enum X { A, B, C };
/// void F();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ValueDecl> valueDecl;
/// Matches C++ constructor declarations.
///
/// Example matches Foo::Foo() and Foo::Foo(int)
/// \code
/// class Foo {
/// public:
/// Foo();
/// Foo(int);
/// int DoSomething();
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConstructorDecl>
cxxConstructorDecl;
/// Matches explicit C++ destructor declarations.
///
/// Example matches Foo::~Foo()
/// \code
/// class Foo {
/// public:
/// virtual ~Foo();
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDestructorDecl>
cxxDestructorDecl;
/// Matches enum declarations.
///
/// Example matches X
/// \code
/// enum X {
/// A, B, C
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumDecl> enumDecl;
/// Matches enum constants.
///
/// Example matches A, B, C
/// \code
/// enum X {
/// A, B, C
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumConstantDecl>
enumConstantDecl;
/// Matches tag declarations.
///
/// Example matches X, Z, U, S, E
/// \code
/// class X;
/// template<class T> class Z {};
/// struct S {};
/// union U {};
/// enum E {
/// A, B, C
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, TagDecl> tagDecl;
/// Matches method declarations.
///
/// Example matches y
/// \code
/// class X { void y(); };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXMethodDecl>
cxxMethodDecl;
/// Matches conversion operator declarations.
///
/// Example matches the operator.
/// \code
/// class X { operator int() const; };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConversionDecl>
cxxConversionDecl;
/// Matches user-defined and implicitly generated deduction guide.
///
/// Example matches the deduction guide.
/// \code
/// template<typename T>
/// class X { X(int) };
/// X(int) -> X<int>;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDeductionGuideDecl>
cxxDeductionGuideDecl;
/// Matches variable declarations.
///
/// Note: this does not match declarations of member variables, which are
/// "field" declarations in Clang parlance.
///
/// Example matches a
/// \code
/// int a;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, VarDecl> varDecl;
/// Matches field declarations.
///
/// Given
/// \code
/// class X { int m; };
/// \endcode
/// fieldDecl()
/// matches 'm'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, FieldDecl> fieldDecl;
/// Matches indirect field declarations.
///
/// Given
/// \code
/// struct X { struct { int a; }; };
/// \endcode
/// indirectFieldDecl()
/// matches 'a'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, IndirectFieldDecl>
indirectFieldDecl;
/// Matches function declarations.
///
/// Example matches f
/// \code
/// void f();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionDecl>
functionDecl;
/// Matches C++ function template declarations.
///
/// Example matches f
/// \code
/// template<class T> void f(T t) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionTemplateDecl>
functionTemplateDecl;
/// Matches friend declarations.
///
/// Given
/// \code
/// class X { friend void foo(); };
/// \endcode
/// friendDecl()
/// matches 'friend void foo()'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, FriendDecl> friendDecl;
/// Matches statements.
///
/// Given
/// \code
/// { ++a; }
/// \endcode
/// stmt()
/// matches both the compound statement '{ ++a; }' and '++a'.
extern const internal::VariadicAllOfMatcher<Stmt> stmt;
/// Matches declaration statements.
///
/// Given
/// \code
/// int a;
/// \endcode
/// declStmt()
/// matches 'int a'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclStmt> declStmt;
/// Matches member expressions.
///
/// Given
/// \code
/// class Y {
/// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; }
/// int a; static int b;
/// };
/// \endcode
/// memberExpr()
/// matches this->x, x, y.x, a, this->b
extern const internal::VariadicDynCastAllOfMatcher<Stmt, MemberExpr> memberExpr;
/// Matches unresolved member expressions.
///
/// Given
/// \code
/// struct X {
/// template <class T> void f();
/// void g();
/// };
/// template <class T> void h() { X x; x.f<T>(); x.g(); }
/// \endcode
/// unresolvedMemberExpr()
/// matches x.f<T>
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedMemberExpr>
unresolvedMemberExpr;
/// Matches member expressions where the actual member referenced could not be
/// resolved because the base expression or the member name was dependent.
///
/// Given
/// \code
/// template <class T> void f() { T t; t.g(); }
/// \endcode
/// cxxDependentScopeMemberExpr()
/// matches t.g
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXDependentScopeMemberExpr>
cxxDependentScopeMemberExpr;
/// Matches call expressions.
///
/// Example matches x.y() and y()
/// \code
/// X x;
/// x.y();
/// y();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CallExpr> callExpr;
/// Matches call expressions which were resolved using ADL.
///
/// Example matches y(x) but not y(42) or NS::y(x).
/// \code
/// namespace NS {
/// struct X {};
/// void y(X);
/// }
///
/// void y(...);
///
/// void test() {
/// NS::X x;
/// y(x); // Matches
/// NS::y(x); // Doesn't match
/// y(42); // Doesn't match
/// using NS::y;
/// y(x); // Found by both unqualified lookup and ADL, doesn't match
// }
/// \endcode
AST_MATCHER(CallExpr, usesADL) { return Node.usesADL(); }
/// Matches lambda expressions.
///
/// Example matches [&](){return 5;}
/// \code
/// [&](){return 5;}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, LambdaExpr> lambdaExpr;
/// Matches member call expressions.
///
/// Example matches x.y()
/// \code
/// X x;
/// x.y();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXMemberCallExpr>
cxxMemberCallExpr;
/// Matches ObjectiveC Message invocation expressions.
///
/// The innermost message send invokes the "alloc" class method on the
/// NSString class, while the outermost message send invokes the
/// "initWithString" instance method on the object returned from
/// NSString's "alloc". This matcher should match both message sends.
/// \code
/// [[NSString alloc] initWithString:@"Hello"]
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCMessageExpr>
objcMessageExpr;
/// Matches Objective-C interface declarations.
///
/// Example matches Foo
/// \code
/// @interface Foo
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCInterfaceDecl>
objcInterfaceDecl;
/// Matches Objective-C implementation declarations.
///
/// Example matches Foo
/// \code
/// @implementation Foo
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCImplementationDecl>
objcImplementationDecl;
/// Matches Objective-C protocol declarations.
///
/// Example matches FooDelegate
/// \code
/// @protocol FooDelegate
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCProtocolDecl>
objcProtocolDecl;
/// Matches Objective-C category declarations.
///
/// Example matches Foo (Additions)
/// \code
/// @interface Foo (Additions)
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryDecl>
objcCategoryDecl;
/// Matches Objective-C category definitions.
///
/// Example matches Foo (Additions)
/// \code
/// @implementation Foo (Additions)
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryImplDecl>
objcCategoryImplDecl;
/// Matches Objective-C method declarations.
///
/// Example matches both declaration and definition of -[Foo method]
/// \code
/// @interface Foo
/// - (void)method;
/// @end
///
/// @implementation Foo
/// - (void)method {}
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCMethodDecl>
objcMethodDecl;
/// Matches block declarations.
///
/// Example matches the declaration of the nameless block printing an input
/// integer.
///
/// \code
/// myFunc(^(int p) {
/// printf("%d", p);
/// })
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, BlockDecl>
blockDecl;
/// Matches Objective-C instance variable declarations.
///
/// Example matches _enabled
/// \code
/// @implementation Foo {
/// BOOL _enabled;
/// }
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCIvarDecl>
objcIvarDecl;
/// Matches Objective-C property declarations.
///
/// Example matches enabled
/// \code
/// @interface Foo
/// @property BOOL enabled;
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCPropertyDecl>
objcPropertyDecl;
/// Matches Objective-C \@throw statements.
///
/// Example matches \@throw
/// \code
/// @throw obj;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtThrowStmt>
objcThrowStmt;
/// Matches Objective-C @try statements.
///
/// Example matches @try
/// \code
/// @try {}
/// @catch (...) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtTryStmt>
objcTryStmt;
/// Matches Objective-C @catch statements.
///
/// Example matches @catch
/// \code
/// @try {}
/// @catch (...) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtCatchStmt>
objcCatchStmt;
/// Matches Objective-C @finally statements.
///
/// Example matches @finally
/// \code
/// @try {}
/// @finally {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtFinallyStmt>
objcFinallyStmt;
/// Matches expressions that introduce cleanups to be run at the end
/// of the sub-expression's evaluation.
///
/// Example matches std::string()
/// \code
/// const std::string str = std::string();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExprWithCleanups>
exprWithCleanups;
/// Matches init list expressions.
///
/// Given
/// \code
/// int a[] = { 1, 2 };
/// struct B { int x, y; };
/// B b = { 5, 6 };
/// \endcode
/// initListExpr()
/// matches "{ 1, 2 }" and "{ 5, 6 }"
extern const internal::VariadicDynCastAllOfMatcher<Stmt, InitListExpr>
initListExpr;
/// Matches the syntactic form of init list expressions
/// (if expression have it).
AST_MATCHER_P(InitListExpr, hasSyntacticForm,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *SyntForm = Node.getSyntacticForm();
return (SyntForm != nullptr &&
InnerMatcher.matches(*SyntForm, Finder, Builder));
}
/// Matches C++ initializer list expressions.
///
/// Given
/// \code
/// std::vector<int> a({ 1, 2, 3 });
/// std::vector<int> b = { 4, 5 };
/// int c[] = { 6, 7 };
/// std::pair<int, int> d = { 8, 9 };
/// \endcode
/// cxxStdInitializerListExpr()
/// matches "{ 1, 2, 3 }" and "{ 4, 5 }"
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXStdInitializerListExpr>
cxxStdInitializerListExpr;
/// Matches implicit initializers of init list expressions.
///
/// Given
/// \code
/// point ptarray[10] = { [2].y = 1.0, [2].x = 2.0, [0].x = 1.0 };
/// \endcode
/// implicitValueInitExpr()
/// matches "[0].y" (implicitly)
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitValueInitExpr>
implicitValueInitExpr;
/// Matches paren list expressions.
/// ParenListExprs don't have a predefined type and are used for late parsing.
/// In the final AST, they can be met in template declarations.
///
/// Given
/// \code
/// template<typename T> class X {
/// void f() {
/// X x(*this);
/// int a = 0, b = 1; int i = (a, b);
/// }
/// };
/// \endcode
/// parenListExpr() matches "*this" but NOT matches (a, b) because (a, b)
/// has a predefined type and is a ParenExpr, not a ParenListExpr.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenListExpr>
parenListExpr;
/// Matches substitutions of non-type template parameters.
///
/// Given
/// \code
/// template <int N>
/// struct A { static const int n = N; };
/// struct B : public A<42> {};
/// \endcode
/// substNonTypeTemplateParmExpr()
/// matches "N" in the right-hand side of "static const int n = N;"
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
SubstNonTypeTemplateParmExpr>
substNonTypeTemplateParmExpr;
/// Matches using declarations.
///
/// Given
/// \code
/// namespace X { int x; }
/// using X::x;
/// \endcode
/// usingDecl()
/// matches \code using X::x \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDecl> usingDecl;
/// Matches using namespace declarations.
///
/// Given
/// \code
/// namespace X { int x; }
/// using namespace X;
/// \endcode
/// usingDirectiveDecl()
/// matches \code using namespace X \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDirectiveDecl>
usingDirectiveDecl;
/// Matches reference to a name that can be looked up during parsing
/// but could not be resolved to a specific declaration.
///
/// Given
/// \code
/// template<typename T>
/// T foo() { T a; return a; }
/// template<typename T>
/// void bar() {
/// foo<T>();
/// }
/// \endcode
/// unresolvedLookupExpr()
/// matches \code foo<T>() \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedLookupExpr>
unresolvedLookupExpr;
/// Matches unresolved using value declarations.
///
/// Given
/// \code
/// template<typename X>
/// class C : private X {
/// using X::x;
/// };
/// \endcode
/// unresolvedUsingValueDecl()
/// matches \code using X::x \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl,
UnresolvedUsingValueDecl>
unresolvedUsingValueDecl;
/// Matches unresolved using value declarations that involve the
/// typename.
///
/// Given
/// \code
/// template <typename T>
/// struct Base { typedef T Foo; };
///
/// template<typename T>
/// struct S : private Base<T> {
/// using typename Base<T>::Foo;
/// };
/// \endcode
/// unresolvedUsingTypenameDecl()
/// matches \code using Base<T>::Foo \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl,
UnresolvedUsingTypenameDecl>
unresolvedUsingTypenameDecl;
/// Matches a constant expression wrapper.
///
/// Example matches the constant in the case statement:
/// (matcher = constantExpr())
/// \code
/// switch (a) {
/// case 37: break;
/// }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConstantExpr>
constantExpr;
/// Matches parentheses used in expressions.
///
/// Example matches (foo() + 1)
/// \code
/// int foo() { return 1; }
/// int a = (foo() + 1);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenExpr> parenExpr;
/// Matches constructor call expressions (including implicit ones).
///
/// Example matches string(ptr, n) and ptr within arguments of f
/// (matcher = cxxConstructExpr())
/// \code
/// void f(const string &a, const string &b);
/// char *ptr;
/// int n;
/// f(string(ptr, n), ptr);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstructExpr>
cxxConstructExpr;
/// Matches unresolved constructor call expressions.
///
/// Example matches T(t) in return statement of f
/// (matcher = cxxUnresolvedConstructExpr())
/// \code
/// template <typename T>
/// void f(const T& t) { return T(t); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXUnresolvedConstructExpr>
cxxUnresolvedConstructExpr;
/// Matches implicit and explicit this expressions.
///
/// Example matches the implicit this expression in "return i".
/// (matcher = cxxThisExpr())
/// \code
/// struct foo {
/// int i;
/// int f() { return i; }
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThisExpr>
cxxThisExpr;
/// Matches nodes where temporaries are created.
///
/// Example matches FunctionTakesString(GetStringByValue())
/// (matcher = cxxBindTemporaryExpr())
/// \code
/// FunctionTakesString(GetStringByValue());
/// FunctionTakesStringByPointer(GetStringPointer());
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBindTemporaryExpr>
cxxBindTemporaryExpr;
/// Matches nodes where temporaries are materialized.
///
/// Example: Given
/// \code
/// struct T {void func();};
/// T f();
/// void g(T);
/// \endcode
/// materializeTemporaryExpr() matches 'f()' in these statements
/// \code
/// T u(f());
/// g(f());
/// f().func();
/// \endcode
/// but does not match
/// \code
/// f();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
MaterializeTemporaryExpr>
materializeTemporaryExpr;
/// Matches new expressions.
///
/// Given
/// \code
/// new X;
/// \endcode
/// cxxNewExpr()
/// matches 'new X'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNewExpr> cxxNewExpr;
/// Matches delete expressions.
///
/// Given
/// \code
/// delete X;
/// \endcode
/// cxxDeleteExpr()
/// matches 'delete X'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDeleteExpr>
cxxDeleteExpr;
/// Matches noexcept expressions.
///
/// Given
/// \code
/// bool a() noexcept;
/// bool b() noexcept(true);
/// bool c() noexcept(false);
/// bool d() noexcept(noexcept(a()));
/// bool e = noexcept(b()) || noexcept(c());
/// \endcode
/// cxxNoexceptExpr()
/// matches `noexcept(a())`, `noexcept(b())` and `noexcept(c())`.
/// doesn't match the noexcept specifier in the declarations a, b, c or d.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNoexceptExpr>
cxxNoexceptExpr;
/// Matches array subscript expressions.
///
/// Given
/// \code
/// int i = a[1];
/// \endcode
/// arraySubscriptExpr()
/// matches "a[1]"
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ArraySubscriptExpr>
arraySubscriptExpr;
/// Matches the value of a default argument at the call site.
///
/// Example matches the CXXDefaultArgExpr placeholder inserted for the
/// default value of the second parameter in the call expression f(42)
/// (matcher = cxxDefaultArgExpr())
/// \code
/// void f(int x, int y = 0);
/// f(42);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDefaultArgExpr>
cxxDefaultArgExpr;
/// Matches overloaded operator calls.
///
/// Note that if an operator isn't overloaded, it won't match. Instead, use
/// binaryOperator matcher.
/// Currently it does not match operators such as new delete.
/// FIXME: figure out why these do not match?
///
/// Example matches both operator<<((o << b), c) and operator<<(o, b)
/// (matcher = cxxOperatorCallExpr())
/// \code
/// ostream &operator<< (ostream &out, int i) { };
/// ostream &o; int b = 1, c = 1;
/// o << b << c;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXOperatorCallExpr>
cxxOperatorCallExpr;
/// Matches expressions.
///
/// Example matches x()
/// \code
/// void f() { x(); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, Expr> expr;
/// Matches expressions that refer to declarations.
///
/// Example matches x in if (x)
/// \code
/// bool x;
/// if (x) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclRefExpr>
declRefExpr;
/// Matches a reference to an ObjCIvar.
///
/// Example: matches "a" in "init" method:
/// \code
/// @implementation A {
/// NSString *a;
/// }
/// - (void) init {
/// a = @"hello";
/// }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCIvarRefExpr>
objcIvarRefExpr;
/// Matches a reference to a block.
///
/// Example: matches "^{}":
/// \code
/// void f() { ^{}(); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, BlockExpr> blockExpr;
/// Matches if statements.
///
/// Example matches 'if (x) {}'
/// \code
/// if (x) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, IfStmt> ifStmt;
/// Matches for statements.
///
/// Example matches 'for (;;) {}'
/// \code
/// for (;;) {}
/// int i[] = {1, 2, 3}; for (auto a : i);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ForStmt> forStmt;
/// Matches the increment statement of a for loop.
///
/// Example:
/// forStmt(hasIncrement(unaryOperator(hasOperatorName("++"))))
/// matches '++x' in
/// \code
/// for (x; x < N; ++x) { }
/// \endcode
AST_MATCHER_P(ForStmt, hasIncrement, internal::Matcher<Stmt>,
InnerMatcher) {
const Stmt *const Increment = Node.getInc();
return (Increment != nullptr &&
InnerMatcher.matches(*Increment, Finder, Builder));
}
/// Matches the initialization statement of a for loop.
///
/// Example:
/// forStmt(hasLoopInit(declStmt()))
/// matches 'int x = 0' in
/// \code
/// for (int x = 0; x < N; ++x) { }
/// \endcode
AST_MATCHER_P(ForStmt, hasLoopInit, internal::Matcher<Stmt>,
InnerMatcher) {
const Stmt *const Init = Node.getInit();
return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder));
}
/// Matches range-based for statements.
///
/// cxxForRangeStmt() matches 'for (auto a : i)'
/// \code
/// int i[] = {1, 2, 3}; for (auto a : i);
/// for(int j = 0; j < 5; ++j);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXForRangeStmt>
cxxForRangeStmt;
/// Matches the initialization statement of a for loop.
///
/// Example:
/// forStmt(hasLoopVariable(anything()))
/// matches 'int x' in
/// \code
/// for (int x : a) { }
/// \endcode
AST_MATCHER_P(CXXForRangeStmt, hasLoopVariable, internal::Matcher<VarDecl>,
InnerMatcher) {
const VarDecl *const Var = Node.getLoopVariable();
return (Var != nullptr && InnerMatcher.matches(*Var, Finder, Builder));
}
/// Matches the range initialization statement of a for loop.
///
/// Example:
/// forStmt(hasRangeInit(anything()))
/// matches 'a' in
/// \code
/// for (int x : a) { }
/// \endcode
AST_MATCHER_P(CXXForRangeStmt, hasRangeInit, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *const Init = Node.getRangeInit();
return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder));
}
/// Matches while statements.
///
/// Given
/// \code
/// while (true) {}
/// \endcode
/// whileStmt()
/// matches 'while (true) {}'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, WhileStmt> whileStmt;
/// Matches do statements.
///
/// Given
/// \code
/// do {} while (true);
/// \endcode
/// doStmt()
/// matches 'do {} while(true)'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DoStmt> doStmt;
/// Matches break statements.
///
/// Given
/// \code
/// while (true) { break; }
/// \endcode
/// breakStmt()
/// matches 'break'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, BreakStmt> breakStmt;
/// Matches continue statements.
///
/// Given
/// \code
/// while (true) { continue; }
/// \endcode
/// continueStmt()
/// matches 'continue'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ContinueStmt>
continueStmt;
/// Matches return statements.
///
/// Given
/// \code
/// return 1;
/// \endcode
/// returnStmt()
/// matches 'return 1'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ReturnStmt> returnStmt;
/// Matches goto statements.
///
/// Given
/// \code
/// goto FOO;
/// FOO: bar();
/// \endcode
/// gotoStmt()
/// matches 'goto FOO'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, GotoStmt> gotoStmt;
/// Matches label statements.
///
/// Given
/// \code
/// goto FOO;
/// FOO: bar();
/// \endcode
/// labelStmt()
/// matches 'FOO:'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, LabelStmt> labelStmt;
/// Matches address of label statements (GNU extension).
///
/// Given
/// \code
/// FOO: bar();
/// void *ptr = &&FOO;
/// goto *bar;
/// \endcode
/// addrLabelExpr()
/// matches '&&FOO'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, AddrLabelExpr>
addrLabelExpr;
/// Matches switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// switchStmt()
/// matches 'switch(a)'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchStmt> switchStmt;
/// Matches case and default statements inside switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// switchCase()
/// matches 'case 42:' and 'default:'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchCase> switchCase;
/// Matches case statements inside switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// caseStmt()
/// matches 'case 42:'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CaseStmt> caseStmt;
/// Matches default statements inside switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// defaultStmt()
/// matches 'default:'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DefaultStmt>
defaultStmt;
/// Matches compound statements.
///
/// Example matches '{}' and '{{}}' in 'for (;;) {{}}'
/// \code
/// for (;;) {{}}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundStmt>
compoundStmt;
/// Matches catch statements.
///
/// \code
/// try {} catch(int i) {}
/// \endcode
/// cxxCatchStmt()
/// matches 'catch(int i)'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXCatchStmt>
cxxCatchStmt;
/// Matches try statements.
///
/// \code
/// try {} catch(int i) {}
/// \endcode
/// cxxTryStmt()
/// matches 'try {}'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTryStmt> cxxTryStmt;
/// Matches throw expressions.
///
/// \code
/// try { throw 5; } catch(int i) {}
/// \endcode
/// cxxThrowExpr()
/// matches 'throw 5'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThrowExpr>
cxxThrowExpr;
/// Matches null statements.
///
/// \code
/// foo();;
/// \endcode
/// nullStmt()
/// matches the second ';'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, NullStmt> nullStmt;
/// Matches asm statements.
///
/// \code
/// int i = 100;
/// __asm("mov al, 2");
/// \endcode
/// asmStmt()
/// matches '__asm("mov al, 2")'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, AsmStmt> asmStmt;
/// Matches bool literals.
///
/// Example matches true
/// \code
/// true
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBoolLiteralExpr>
cxxBoolLiteral;
/// Matches string literals (also matches wide string literals).
///
/// Example matches "abcd", L"abcd"
/// \code
/// char *s = "abcd";
/// wchar_t *ws = L"abcd";
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, StringLiteral>
stringLiteral;
/// Matches character literals (also matches wchar_t).
///
/// Not matching Hex-encoded chars (e.g. 0x1234, which is a IntegerLiteral),
/// though.
///
/// Example matches 'a', L'a'
/// \code
/// char ch = 'a';
/// wchar_t chw = L'a';
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CharacterLiteral>
characterLiteral;
/// Matches integer literals of all sizes / encodings, e.g.
/// 1, 1L, 0x1 and 1U.
///
/// Does not match character-encoded integers such as L'a'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, IntegerLiteral>
integerLiteral;
/// Matches float literals of all sizes / encodings, e.g.
/// 1.0, 1.0f, 1.0L and 1e10.
///
/// Does not match implicit conversions such as
/// \code
/// float a = 10;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, FloatingLiteral>
floatLiteral;
/// Matches imaginary literals, which are based on integer and floating
/// point literals e.g.: 1i, 1.0i
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImaginaryLiteral>
imaginaryLiteral;
/// Matches fixed point literals
extern const internal::VariadicDynCastAllOfMatcher<Stmt, FixedPointLiteral>
fixedPointLiteral;
/// Matches user defined literal operator call.
///
/// Example match: "foo"_suffix
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UserDefinedLiteral>
userDefinedLiteral;
/// Matches compound (i.e. non-scalar) literals
///
/// Example match: {1}, (1, 2)
/// \code
/// int array[4] = {1};
/// vector int myvec = (vector int)(1, 2);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundLiteralExpr>
compoundLiteralExpr;
/// Matches nullptr literal.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNullPtrLiteralExpr>
cxxNullPtrLiteralExpr;
/// Matches GNU __builtin_choose_expr.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ChooseExpr>
chooseExpr;
/// Matches GNU __null expression.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, GNUNullExpr>
gnuNullExpr;
/// Matches atomic builtins.
/// Example matches __atomic_load_n(ptr, 1)
/// \code
/// void foo() { int *ptr; __atomic_load_n(ptr, 1); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, AtomicExpr> atomicExpr;
/// Matches statement expression (GNU extension).
///
/// Example match: ({ int X = 4; X; })
/// \code
/// int C = ({ int X = 4; X; });
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, StmtExpr> stmtExpr;
/// Matches binary operator expressions.
///
/// Example matches a || b
/// \code
/// !(a || b)
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryOperator>
binaryOperator;
/// Matches unary operator expressions.
///
/// Example matches !a
/// \code
/// !a || b
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryOperator>
unaryOperator;
/// Matches conditional operator expressions.
///
/// Example matches a ? b : c
/// \code
/// (a ? b : c) + 42
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConditionalOperator>
conditionalOperator;
/// Matches binary conditional operator expressions (GNU extension).
///
/// Example matches a ?: b
/// \code
/// (a ?: b) + 42;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
BinaryConditionalOperator>
binaryConditionalOperator;
/// Matches opaque value expressions. They are used as helpers
/// to reference another expressions and can be met
/// in BinaryConditionalOperators, for example.
///
/// Example matches 'a'
/// \code
/// (a ?: c) + 42;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, OpaqueValueExpr>
opaqueValueExpr;
/// Matches a C++ static_assert declaration.
///
/// Example:
/// staticAssertExpr()
/// matches
/// static_assert(sizeof(S) == sizeof(int))
/// in
/// \code
/// struct S {
/// int x;
/// };
/// static_assert(sizeof(S) == sizeof(int));
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, StaticAssertDecl>
staticAssertDecl;
/// Matches a reinterpret_cast expression.
///
/// Either the source expression or the destination type can be matched
/// using has(), but hasDestinationType() is more specific and can be
/// more readable.
///
/// Example matches reinterpret_cast<char*>(&p) in
/// \code
/// void* p = reinterpret_cast<char*>(&p);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXReinterpretCastExpr>
cxxReinterpretCastExpr;
/// Matches a C++ static_cast expression.
///
/// \see hasDestinationType
/// \see reinterpretCast
///
/// Example:
/// cxxStaticCastExpr()
/// matches
/// static_cast<long>(8)
/// in
/// \code
/// long eight(static_cast<long>(8));
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXStaticCastExpr>
cxxStaticCastExpr;
/// Matches a dynamic_cast expression.
///
/// Example:
/// cxxDynamicCastExpr()
/// matches
/// dynamic_cast<D*>(&b);
/// in
/// \code
/// struct B { virtual ~B() {} }; struct D : B {};
/// B b;
/// D* p = dynamic_cast<D*>(&b);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDynamicCastExpr>
cxxDynamicCastExpr;
/// Matches a const_cast expression.
///
/// Example: Matches const_cast<int*>(&r) in
/// \code
/// int n = 42;
/// const int &r(n);
/// int* p = const_cast<int*>(&r);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstCastExpr>
cxxConstCastExpr;
/// Matches a C-style cast expression.
///
/// Example: Matches (int) 2.2f in
/// \code
/// int i = (int) 2.2f;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CStyleCastExpr>
cStyleCastExpr;
/// Matches explicit cast expressions.
///
/// Matches any cast expression written in user code, whether it be a
/// C-style cast, a functional-style cast, or a keyword cast.
///
/// Does not match implicit conversions.
///
/// Note: the name "explicitCast" is chosen to match Clang's terminology, as
/// Clang uses the term "cast" to apply to implicit conversions as well as to
/// actual cast expressions.
///
/// \see hasDestinationType.
///
/// Example: matches all five of the casts in
/// \code
/// int((int)(reinterpret_cast<int>(static_cast<int>(const_cast<int>(42)))))
/// \endcode
/// but does not match the implicit conversion in
/// \code
/// long ell = 42;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExplicitCastExpr>
explicitCastExpr;
/// Matches the implicit cast nodes of Clang's AST.
///
/// This matches many different places, including function call return value
/// eliding, as well as any type conversions.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitCastExpr>
implicitCastExpr;
/// Matches any cast nodes of Clang's AST.
///
/// Example: castExpr() matches each of the following:
/// \code
/// (int) 3;
/// const_cast<Expr *>(SubExpr);
/// char c = 0;
/// \endcode
/// but does not match
/// \code
/// int i = (0);
/// int k = 0;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CastExpr> castExpr;
/// Matches functional cast expressions
///
/// Example: Matches Foo(bar);
/// \code
/// Foo f = bar;
/// Foo g = (Foo) bar;
/// Foo h = Foo(bar);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXFunctionalCastExpr>
cxxFunctionalCastExpr;
/// Matches functional cast expressions having N != 1 arguments
///
/// Example: Matches Foo(bar, bar)
/// \code
/// Foo h = Foo(bar, bar);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTemporaryObjectExpr>
cxxTemporaryObjectExpr;
/// Matches predefined identifier expressions [C99 6.4.2.2].
///
/// Example: Matches __func__
/// \code
/// printf("%s", __func__);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, PredefinedExpr>
predefinedExpr;
/// Matches C99 designated initializer expressions [C99 6.7.8].
///
/// Example: Matches { [2].y = 1.0, [0].x = 1.0 }
/// \code
/// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DesignatedInitExpr>
designatedInitExpr;
/// Matches designated initializer expressions that contain
/// a specific number of designators.
///
/// Example: Given
/// \code
/// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 };
/// point ptarray2[10] = { [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 };
/// \endcode
/// designatorCountIs(2)
/// matches '{ [2].y = 1.0, [0].x = 1.0 }',
/// but not '{ [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 }'.
AST_MATCHER_P(DesignatedInitExpr, designatorCountIs, unsigned, N) {
return Node.size() == N;
}
/// Matches \c QualTypes in the clang AST.
extern const internal::VariadicAllOfMatcher<QualType> qualType;
/// Matches \c Types in the clang AST.
extern const internal::VariadicAllOfMatcher<Type> type;
/// Matches \c TypeLocs in the clang AST.
extern const internal::VariadicAllOfMatcher<TypeLoc> typeLoc;
/// Matches if any of the given matchers matches.
///
/// Unlike \c anyOf, \c eachOf will generate a match result for each
/// matching submatcher.
///
/// For example, in:
/// \code
/// class A { int a; int b; };
/// \endcode
/// The matcher:
/// \code
/// cxxRecordDecl(eachOf(has(fieldDecl(hasName("a")).bind("v")),
/// has(fieldDecl(hasName("b")).bind("v"))))
/// \endcode
/// will generate two results binding "v", the first of which binds
/// the field declaration of \c a, the second the field declaration of
/// \c b.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
eachOf;
/// Matches if any of the given matchers matches.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
anyOf;
/// Matches if all given matchers match.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
allOf;
/// Matches any node regardless of the submatcher.
///
/// However, \c optionally will retain any bindings generated by the submatcher.
/// Useful when additional information which may or may not present about a main
/// matching node is desired.
///
/// For example, in:
/// \code
/// class Foo {
/// int bar;
/// }
/// \endcode
/// The matcher:
/// \code
/// cxxRecordDecl(
/// optionally(has(
/// fieldDecl(hasName("bar")).bind("var")
/// ))).bind("record")
/// \endcode
/// will produce a result binding for both "record" and "var".
/// The matcher will produce a "record" binding for even if there is no data
/// member named "bar" in that class.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<1, 1> optionally;
/// Matches sizeof (C99), alignof (C++11) and vec_step (OpenCL)
///
/// Given
/// \code
/// Foo x = bar;
/// int y = sizeof(x) + alignof(x);
/// \endcode
/// unaryExprOrTypeTraitExpr()
/// matches \c sizeof(x) and \c alignof(x)
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
UnaryExprOrTypeTraitExpr>
unaryExprOrTypeTraitExpr;
/// Matches unary expressions that have a specific type of argument.
///
/// Given
/// \code
/// int a, c; float b; int s = sizeof(a) + sizeof(b) + alignof(c);
/// \endcode
/// unaryExprOrTypeTraitExpr(hasArgumentOfType(asString("int"))
/// matches \c sizeof(a) and \c alignof(c)
AST_MATCHER_P(UnaryExprOrTypeTraitExpr, hasArgumentOfType,
internal::Matcher<QualType>, InnerMatcher) {
const QualType ArgumentType = Node.getTypeOfArgument();
return InnerMatcher.matches(ArgumentType, Finder, Builder);
}
/// Matches unary expressions of a certain kind.
///
/// Given
/// \code
/// int x;
/// int s = sizeof(x) + alignof(x)
/// \endcode
/// unaryExprOrTypeTraitExpr(ofKind(UETT_SizeOf))
/// matches \c sizeof(x)
///
/// If the matcher is use from clang-query, UnaryExprOrTypeTrait parameter
/// should be passed as a quoted string. e.g., ofKind("UETT_SizeOf").
AST_MATCHER_P(UnaryExprOrTypeTraitExpr, ofKind, UnaryExprOrTypeTrait, Kind) {
return Node.getKind() == Kind;
}
/// Same as unaryExprOrTypeTraitExpr, but only matching
/// alignof.
inline internal::BindableMatcher<Stmt> alignOfExpr(
const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) {
return stmt(unaryExprOrTypeTraitExpr(
allOf(anyOf(ofKind(UETT_AlignOf), ofKind(UETT_PreferredAlignOf)),
InnerMatcher)));
}
/// Same as unaryExprOrTypeTraitExpr, but only matching
/// sizeof.
inline internal::BindableMatcher<Stmt> sizeOfExpr(
const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) {
return stmt(unaryExprOrTypeTraitExpr(
allOf(ofKind(UETT_SizeOf), InnerMatcher)));
}
/// Matches NamedDecl nodes that have the specified name.
///
/// Supports specifying enclosing namespaces or classes by prefixing the name
/// with '<enclosing>::'.
/// Does not match typedefs of an underlying type with the given name.
///
/// Example matches X (Name == "X")
/// \code
/// class X;
/// \endcode
///
/// Example matches X (Name is one of "::a::b::X", "a::b::X", "b::X", "X")
/// \code
/// namespace a { namespace b { class X; } }
/// \endcode
inline internal::Matcher<NamedDecl> hasName(StringRef Name) {
return internal::Matcher<NamedDecl>(
new internal::HasNameMatcher({std::string(Name)}));
}
/// Matches NamedDecl nodes that have any of the specified names.
///
/// This matcher is only provided as a performance optimization of hasName.
/// \code
/// hasAnyName(a, b, c)
/// \endcode
/// is equivalent to, but faster than
/// \code
/// anyOf(hasName(a), hasName(b), hasName(c))
/// \endcode
extern const internal::VariadicFunction<internal::Matcher<NamedDecl>, StringRef,
internal::hasAnyNameFunc>
hasAnyName;
/// Matches NamedDecl nodes whose fully qualified names contain
/// a substring matched by the given RegExp.
///
/// Supports specifying enclosing namespaces or classes by
/// prefixing the name with '<enclosing>::'. Does not match typedefs
/// of an underlying type with the given name.
///
/// Example matches X (regexp == "::X")
/// \code
/// class X;
/// \endcode
///
/// Example matches X (regexp is one of "::X", "^foo::.*X", among others)
/// \code
/// namespace foo { namespace bar { class X; } }
/// \endcode
AST_MATCHER_P(NamedDecl, matchesName, std::string, RegExp) {
assert(!RegExp.empty());
std::string FullNameString = "::" + Node.getQualifiedNameAsString();
llvm::Regex RE(RegExp);
return RE.match(FullNameString);
}
/// Matches overloaded operator names.
///
/// Matches overloaded operator names specified in strings without the
/// "operator" prefix: e.g. "<<".
///
/// Given:
/// \code
/// class A { int operator*(); };
/// const A &operator<<(const A &a, const A &b);
/// A a;
/// a << a; // <-- This matches
/// \endcode
///
/// \c cxxOperatorCallExpr(hasOverloadedOperatorName("<<"))) matches the
/// specified line and
/// \c cxxRecordDecl(hasMethod(hasOverloadedOperatorName("*")))
/// matches the declaration of \c A.
///
/// Usable as: Matcher<CXXOperatorCallExpr>, Matcher<FunctionDecl>
inline internal::PolymorphicMatcherWithParam1<
internal::HasOverloadedOperatorNameMatcher, std::vector<std::string>,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>
hasOverloadedOperatorName(StringRef Name) {
return internal::PolymorphicMatcherWithParam1<
internal::HasOverloadedOperatorNameMatcher, std::vector<std::string>,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>(
{std::string(Name)});
}
/// Matches overloaded operator names.
///
/// Matches overloaded operator names specified in strings without the
/// "operator" prefix: e.g. "<<".
///
/// hasAnyOverloadesOperatorName("+", "-")
/// Is equivalent to
/// anyOf(hasOverloadedOperatorName("+"), hasOverloadedOperatorName("-"))
extern const internal::VariadicFunction<
internal::PolymorphicMatcherWithParam1<
internal::HasOverloadedOperatorNameMatcher, std::vector<std::string>,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>,
StringRef, internal::hasAnyOverloadedOperatorNameFunc>
hasAnyOverloadedOperatorName;
/// Matches C++ classes that are directly or indirectly derived from a class
/// matching \c Base, or Objective-C classes that directly or indirectly
/// subclass a class matching \c Base.
///
/// Note that a class is not considered to be derived from itself.
///
/// Example matches Y, Z, C (Base == hasName("X"))
/// \code
/// class X;
/// class Y : public X {}; // directly derived
/// class Z : public Y {}; // indirectly derived
/// typedef X A;
/// typedef A B;
/// class C : public B {}; // derived from a typedef of X
/// \endcode
///
/// In the following example, Bar matches isDerivedFrom(hasName("X")):
/// \code
/// class Foo;
/// typedef Foo X;
/// class Bar : public Foo {}; // derived from a type that X is a typedef of
/// \endcode
///
/// In the following example, Bar matches isDerivedFrom(hasName("NSObject"))
/// \code
/// @interface NSObject @end
/// @interface Bar : NSObject @end
/// \endcode
///
/// Usable as: Matcher<CXXRecordDecl>, Matcher<ObjCInterfaceDecl>
AST_POLYMORPHIC_MATCHER_P(
isDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
internal::Matcher<NamedDecl>, Base) {
// Check if the node is a C++ struct/union/class.
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/false);
// The node must be an Objective-C class.
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder,
/*Directly=*/false);
}
/// Overloaded method as shortcut for \c isDerivedFrom(hasName(...)).
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
std::string, BaseName, 1) {
if (BaseName.empty())
return false;
const auto M = isDerivedFrom(hasName(BaseName));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Matches C++ classes that have a direct or indirect base matching \p
/// BaseSpecMatcher.
///
/// Example:
/// matcher hasAnyBase(hasType(cxxRecordDecl(hasName("SpecialBase")))))
/// \code
/// class Foo;
/// class Bar : Foo {};
/// class Baz : Bar {};
/// class SpecialBase;
/// class Proxy : SpecialBase {}; // matches Proxy
/// class IndirectlyDerived : Proxy {}; //matches IndirectlyDerived
/// \endcode
///
// FIXME: Refactor this and isDerivedFrom to reuse implementation.
AST_MATCHER_P(CXXRecordDecl, hasAnyBase, internal::Matcher<CXXBaseSpecifier>,
BaseSpecMatcher) {
return internal::matchesAnyBase(Node, BaseSpecMatcher, Finder, Builder);
}
/// Similar to \c isDerivedFrom(), but also matches classes that directly
/// match \c Base.
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isSameOrDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
internal::Matcher<NamedDecl>, Base, 0) {
const auto M = anyOf(Base, isDerivedFrom(Base));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Overloaded method as shortcut for
/// \c isSameOrDerivedFrom(hasName(...)).
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isSameOrDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
std::string, BaseName, 1) {
if (BaseName.empty())
return false;
const auto M = isSameOrDerivedFrom(hasName(BaseName));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Matches C++ or Objective-C classes that are directly derived from a class
/// matching \c Base.
///
/// Note that a class is not considered to be derived from itself.
///
/// Example matches Y, C (Base == hasName("X"))
/// \code
/// class X;
/// class Y : public X {}; // directly derived
/// class Z : public Y {}; // indirectly derived
/// typedef X A;
/// typedef A B;
/// class C : public B {}; // derived from a typedef of X
/// \endcode
///
/// In the following example, Bar matches isDerivedFrom(hasName("X")):
/// \code
/// class Foo;
/// typedef Foo X;
/// class Bar : public Foo {}; // derived from a type that X is a typedef of
/// \endcode
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isDirectlyDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
internal::Matcher<NamedDecl>, Base, 0) {
// Check if the node is a C++ struct/union/class.
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/true);
// The node must be an Objective-C class.
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder,
/*Directly=*/true);
}
/// Overloaded method as shortcut for \c isDirectlyDerivedFrom(hasName(...)).
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isDirectlyDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
std::string, BaseName, 1) {
if (BaseName.empty())
return false;
const auto M = isDirectlyDerivedFrom(hasName(BaseName));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Matches the first method of a class or struct that satisfies \c
/// InnerMatcher.
///
/// Given:
/// \code
/// class A { void func(); };
/// class B { void member(); };
/// \endcode
///
/// \c cxxRecordDecl(hasMethod(hasName("func"))) matches the declaration of
/// \c A but not \c B.
AST_MATCHER_P(CXXRecordDecl, hasMethod, internal::Matcher<CXXMethodDecl>,
InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.method_begin(),
Node.method_end(), Finder, Builder);
}
/// Matches the generated class of lambda expressions.
///
/// Given:
/// \code
/// auto x = []{};
/// \endcode
///
/// \c cxxRecordDecl(isLambda()) matches the implicit class declaration of
/// \c decltype(x)
AST_MATCHER(CXXRecordDecl, isLambda) {
return Node.isLambda();
}
/// Matches AST nodes that have child AST nodes that match the
/// provided matcher.
///
/// Example matches X, Y
/// (matcher = cxxRecordDecl(has(cxxRecordDecl(hasName("X")))
/// \code
/// class X {}; // Matches X, because X::X is a class of name X inside X.
/// class Y { class X {}; };
/// class Z { class Y { class X {}; }; }; // Does not match Z.
/// \endcode
///
/// ChildT must be an AST base type.
///
/// Usable as: Any Matcher
/// Note that has is direct matcher, so it also matches things like implicit
/// casts and paren casts. If you are matching with expr then you should
/// probably consider using ignoringParenImpCasts like:
/// has(ignoringParenImpCasts(expr())).
extern const internal::ArgumentAdaptingMatcherFunc<internal::HasMatcher> has;
/// Matches AST nodes that have descendant AST nodes that match the
/// provided matcher.
///
/// Example matches X, Y, Z
/// (matcher = cxxRecordDecl(hasDescendant(cxxRecordDecl(hasName("X")))))
/// \code
/// class X {}; // Matches X, because X::X is a class of name X inside X.
/// class Y { class X {}; };
/// class Z { class Y { class X {}; }; };
/// \endcode
///
/// DescendantT must be an AST base type.
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasDescendantMatcher>
hasDescendant;
/// Matches AST nodes that have child AST nodes that match the
/// provided matcher.
///
/// Example matches X, Y, Y::X, Z::Y, Z::Y::X
/// (matcher = cxxRecordDecl(forEach(cxxRecordDecl(hasName("X")))
/// \code
/// class X {};
/// class Y { class X {}; }; // Matches Y, because Y::X is a class of name X
/// // inside Y.
/// class Z { class Y { class X {}; }; }; // Does not match Z.
/// \endcode
///
/// ChildT must be an AST base type.
///
/// As opposed to 'has', 'forEach' will cause a match for each result that
/// matches instead of only on the first one.
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<internal::ForEachMatcher>
forEach;
/// Matches AST nodes that have descendant AST nodes that match the
/// provided matcher.
///
/// Example matches X, A, A::X, B, B::C, B::C::X
/// (matcher = cxxRecordDecl(forEachDescendant(cxxRecordDecl(hasName("X")))))
/// \code
/// class X {};
/// class A { class X {}; }; // Matches A, because A::X is a class of name
/// // X inside A.
/// class B { class C { class X {}; }; };
/// \endcode
///
/// DescendantT must be an AST base type.
///
/// As opposed to 'hasDescendant', 'forEachDescendant' will cause a match for
/// each result that matches instead of only on the first one.
///
/// Note: Recursively combined ForEachDescendant can cause many matches:
/// cxxRecordDecl(forEachDescendant(cxxRecordDecl(
/// forEachDescendant(cxxRecordDecl())
/// )))
/// will match 10 times (plus injected class name matches) on:
/// \code
/// class A { class B { class C { class D { class E {}; }; }; }; };
/// \endcode
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::ForEachDescendantMatcher>
forEachDescendant;
/// Matches if the node or any descendant matches.
///
/// Generates results for each match.
///
/// For example, in:
/// \code
/// class A { class B {}; class C {}; };
/// \endcode
/// The matcher:
/// \code
/// cxxRecordDecl(hasName("::A"),
/// findAll(cxxRecordDecl(isDefinition()).bind("m")))
/// \endcode
/// will generate results for \c A, \c B and \c C.
///
/// Usable as: Any Matcher
template <typename T>
internal::Matcher<T> findAll(const internal::Matcher<T> &Matcher) {
return eachOf(Matcher, forEachDescendant(Matcher));
}
/// Matches AST nodes that have a parent that matches the provided
/// matcher.
///
/// Given
/// \code
/// void f() { for (;;) { int x = 42; if (true) { int x = 43; } } }
/// \endcode
/// \c compoundStmt(hasParent(ifStmt())) matches "{ int x = 43; }".
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasParentMatcher,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>>
hasParent;
/// Matches AST nodes that have an ancestor that matches the provided
/// matcher.
///
/// Given
/// \code
/// void f() { if (true) { int x = 42; } }
/// void g() { for (;;) { int x = 43; } }
/// \endcode
/// \c expr(integerLiteral(hasAncestor(ifStmt()))) matches \c 42, but not 43.
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasAncestorMatcher,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>>
hasAncestor;
/// Matches if the provided matcher does not match.
///
/// Example matches Y (matcher = cxxRecordDecl(unless(hasName("X"))))
/// \code
/// class X {};
/// class Y {};
/// \endcode
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<1, 1> unless;
/// Matches a node if the declaration associated with that node
/// matches the given matcher.
///
/// The associated declaration is:
/// - for type nodes, the declaration of the underlying type
/// - for CallExpr, the declaration of the callee
/// - for MemberExpr, the declaration of the referenced member
/// - for CXXConstructExpr, the declaration of the constructor
/// - for CXXNewExpr, the declaration of the operator new
/// - for ObjCIvarExpr, the declaration of the ivar
///
/// For type nodes, hasDeclaration will generally match the declaration of the
/// sugared type. Given
/// \code
/// class X {};
/// typedef X Y;
/// Y y;
/// \endcode
/// in varDecl(hasType(hasDeclaration(decl()))) the decl will match the
/// typedefDecl. A common use case is to match the underlying, desugared type.
/// This can be achieved by using the hasUnqualifiedDesugaredType matcher:
/// \code
/// varDecl(hasType(hasUnqualifiedDesugaredType(
/// recordType(hasDeclaration(decl())))))
/// \endcode
/// In this matcher, the decl will match the CXXRecordDecl of class X.
///
/// Usable as: Matcher<AddrLabelExpr>, Matcher<CallExpr>,
/// Matcher<CXXConstructExpr>, Matcher<CXXNewExpr>, Matcher<DeclRefExpr>,
/// Matcher<EnumType>, Matcher<InjectedClassNameType>, Matcher<LabelStmt>,
/// Matcher<MemberExpr>, Matcher<QualType>, Matcher<RecordType>,
/// Matcher<TagType>, Matcher<TemplateSpecializationType>,
/// Matcher<TemplateTypeParmType>, Matcher<TypedefType>,
/// Matcher<UnresolvedUsingType>
inline internal::PolymorphicMatcherWithParam1<
internal::HasDeclarationMatcher, internal::Matcher<Decl>,
void(internal::HasDeclarationSupportedTypes)>
hasDeclaration(const internal::Matcher<Decl> &InnerMatcher) {
return internal::PolymorphicMatcherWithParam1<
internal::HasDeclarationMatcher, internal::Matcher<Decl>,
void(internal::HasDeclarationSupportedTypes)>(InnerMatcher);
}
/// Matches a \c NamedDecl whose underlying declaration matches the given
/// matcher.
///
/// Given
/// \code
/// namespace N { template<class T> void f(T t); }
/// template <class T> void g() { using N::f; f(T()); }
/// \endcode
/// \c unresolvedLookupExpr(hasAnyDeclaration(
/// namedDecl(hasUnderlyingDecl(hasName("::N::f")))))
/// matches the use of \c f in \c g() .
AST_MATCHER_P(NamedDecl, hasUnderlyingDecl, internal::Matcher<NamedDecl>,
InnerMatcher) {
const NamedDecl *UnderlyingDecl = Node.getUnderlyingDecl();
return UnderlyingDecl != nullptr &&
InnerMatcher.matches(*UnderlyingDecl, Finder, Builder);
}
/// Matches on the implicit object argument of a member call expression, after
/// stripping off any parentheses or implicit casts.
///
/// Given
/// \code
/// class Y { public: void m(); };
/// Y g();
/// class X : public Y {};
/// void z(Y y, X x) { y.m(); (g()).m(); x.m(); }
/// \endcode
/// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("Y")))))
/// matches `y.m()` and `(g()).m()`.
/// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("X")))))
/// matches `x.m()`.
/// cxxMemberCallExpr(on(callExpr()))
/// matches `(g()).m()`.
///
/// FIXME: Overload to allow directly matching types?
AST_MATCHER_P(CXXMemberCallExpr, on, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *ExprNode = Node.getImplicitObjectArgument()
->IgnoreParenImpCasts();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
/// Matches on the receiver of an ObjectiveC Message expression.
///
/// Example
/// matcher = objCMessageExpr(hasReceiverType(asString("UIWebView *")));
/// matches the [webView ...] message invocation.
/// \code
/// NSString *webViewJavaScript = ...
/// UIWebView *webView = ...
/// [webView stringByEvaluatingJavaScriptFromString:webViewJavascript];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, hasReceiverType, internal::Matcher<QualType>,
InnerMatcher) {
const QualType TypeDecl = Node.getReceiverType();
return InnerMatcher.matches(TypeDecl, Finder, Builder);
}
/// Returns true when the Objective-C method declaration is a class method.
///
/// Example
/// matcher = objcMethodDecl(isClassMethod())
/// matches
/// \code
/// @interface I + (void)foo; @end
/// \endcode
/// but not
/// \code
/// @interface I - (void)bar; @end
/// \endcode
AST_MATCHER(ObjCMethodDecl, isClassMethod) {
return Node.isClassMethod();
}
/// Returns true when the Objective-C method declaration is an instance method.
///
/// Example
/// matcher = objcMethodDecl(isInstanceMethod())
/// matches
/// \code
/// @interface I - (void)bar; @end
/// \endcode
/// but not
/// \code
/// @interface I + (void)foo; @end
/// \endcode
AST_MATCHER(ObjCMethodDecl, isInstanceMethod) {
return Node.isInstanceMethod();
}
/// Returns true when the Objective-C message is sent to a class.
///
/// Example
/// matcher = objcMessageExpr(isClassMessage())
/// matches
/// \code
/// [NSString stringWithFormat:@"format"];
/// \endcode
/// but not
/// \code
/// NSString *x = @"hello";
/// [x containsString:@"h"];
/// \endcode
AST_MATCHER(ObjCMessageExpr, isClassMessage) {
return Node.isClassMessage();
}
/// Returns true when the Objective-C message is sent to an instance.
///
/// Example
/// matcher = objcMessageExpr(isInstanceMessage())
/// matches
/// \code
/// NSString *x = @"hello";
/// [x containsString:@"h"];
/// \endcode
/// but not
/// \code
/// [NSString stringWithFormat:@"format"];
/// \endcode
AST_MATCHER(ObjCMessageExpr, isInstanceMessage) {
return Node.isInstanceMessage();
}
/// Matches if the Objective-C message is sent to an instance,
/// and the inner matcher matches on that instance.
///
/// For example the method call in
/// \code
/// NSString *x = @"hello";
/// [x containsString:@"h"];
/// \endcode
/// is matched by
/// objcMessageExpr(hasReceiver(declRefExpr(to(varDecl(hasName("x"))))))
AST_MATCHER_P(ObjCMessageExpr, hasReceiver, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *ReceiverNode = Node.getInstanceReceiver();
return (ReceiverNode != nullptr &&
InnerMatcher.matches(*ReceiverNode->IgnoreParenImpCasts(), Finder,
Builder));
}
/// Matches when BaseName == Selector.getAsString()
///
/// matcher = objCMessageExpr(hasSelector("loadHTMLString:baseURL:"));
/// matches the outer message expr in the code below, but NOT the message
/// invocation for self.bodyView.
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, hasSelector, std::string, BaseName) {
Selector Sel = Node.getSelector();
return BaseName.compare(Sel.getAsString()) == 0;
}
/// Matches when at least one of the supplied string equals to the
/// Selector.getAsString()
///
/// matcher = objCMessageExpr(hasSelector("methodA:", "methodB:"));
/// matches both of the expressions below:
/// \code
/// [myObj methodA:argA];
/// [myObj methodB:argB];
/// \endcode
extern const internal::VariadicFunction<internal::Matcher<ObjCMessageExpr>,
StringRef,
internal::hasAnySelectorFunc>
hasAnySelector;
/// Matches ObjC selectors whose name contains
/// a substring matched by the given RegExp.
/// matcher = objCMessageExpr(matchesSelector("loadHTMLString\:baseURL?"));
/// matches the outer message expr in the code below, but NOT the message
/// invocation for self.bodyView.
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, matchesSelector, std::string, RegExp) {
assert(!RegExp.empty());
std::string SelectorString = Node.getSelector().getAsString();
llvm::Regex RE(RegExp);
return RE.match(SelectorString);
}
/// Matches when the selector is the empty selector
///
/// Matches only when the selector of the objCMessageExpr is NULL. This may
/// represent an error condition in the tree!
AST_MATCHER(ObjCMessageExpr, hasNullSelector) {
return Node.getSelector().isNull();
}
/// Matches when the selector is a Unary Selector
///
/// matcher = objCMessageExpr(matchesSelector(hasUnarySelector());
/// matches self.bodyView in the code below, but NOT the outer message
/// invocation of "loadHTMLString:baseURL:".
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER(ObjCMessageExpr, hasUnarySelector) {
return Node.getSelector().isUnarySelector();
}
/// Matches when the selector is a keyword selector
///
/// objCMessageExpr(hasKeywordSelector()) matches the generated setFrame
/// message expression in
///
/// \code
/// UIWebView *webView = ...;
/// CGRect bodyFrame = webView.frame;
/// bodyFrame.size.height = self.bodyContentHeight;
/// webView.frame = bodyFrame;
/// // ^---- matches here
/// \endcode
AST_MATCHER(ObjCMessageExpr, hasKeywordSelector) {
return Node.getSelector().isKeywordSelector();
}
/// Matches when the selector has the specified number of arguments
///
/// matcher = objCMessageExpr(numSelectorArgs(0));
/// matches self.bodyView in the code below
///
/// matcher = objCMessageExpr(numSelectorArgs(2));
/// matches the invocation of "loadHTMLString:baseURL:" but not that
/// of self.bodyView
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, numSelectorArgs, unsigned, N) {
return Node.getSelector().getNumArgs() == N;
}
/// Matches if the call expression's callee expression matches.
///
/// Given
/// \code
/// class Y { void x() { this->x(); x(); Y y; y.x(); } };
/// void f() { f(); }
/// \endcode
/// callExpr(callee(expr()))
/// matches this->x(), x(), y.x(), f()
/// with callee(...)
/// matching this->x, x, y.x, f respectively
///
/// Note: Callee cannot take the more general internal::Matcher<Expr>
/// because this introduces ambiguous overloads with calls to Callee taking a
/// internal::Matcher<Decl>, as the matcher hierarchy is purely
/// implemented in terms of implicit casts.
AST_MATCHER_P(CallExpr, callee, internal::Matcher<Stmt>,
InnerMatcher) {
const Expr *ExprNode = Node.getCallee();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
/// Matches if the call expression's callee's declaration matches the
/// given matcher.
///
/// Example matches y.x() (matcher = callExpr(callee(
/// cxxMethodDecl(hasName("x")))))
/// \code
/// class Y { public: void x(); };
/// void z() { Y y; y.x(); }
/// \endcode
AST_MATCHER_P_OVERLOAD(CallExpr, callee, internal::Matcher<Decl>, InnerMatcher,
1) {
return callExpr(hasDeclaration(InnerMatcher)).matches(Node, Finder, Builder);
}
/// Matches if the expression's or declaration's type matches a type
/// matcher.
///
/// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X")))))
/// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X")))))
/// and U (matcher = typedefDecl(hasType(asString("int")))
/// and friend class X (matcher = friendDecl(hasType("X"))
/// \code
/// class X {};
/// void y(X &x) { x; X z; }
/// typedef int U;
/// class Y { friend class X; };
/// \endcode
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
hasType,
AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, TypedefNameDecl,
ValueDecl),
internal::Matcher<QualType>, InnerMatcher, 0) {
QualType QT = internal::getUnderlyingType(Node);
if (!QT.isNull())
return InnerMatcher.matches(QT, Finder, Builder);
return false;
}
/// Overloaded to match the declaration of the expression's or value
/// declaration's type.
///
/// In case of a value declaration (for example a variable declaration),
/// this resolves one layer of indirection. For example, in the value
/// declaration "X x;", cxxRecordDecl(hasName("X")) matches the declaration of
/// X, while varDecl(hasType(cxxRecordDecl(hasName("X")))) matches the
/// declaration of x.
///
/// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X")))))
/// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X")))))
/// and friend class X (matcher = friendDecl(hasType("X"))
/// \code
/// class X {};
/// void y(X &x) { x; X z; }
/// class Y { friend class X; };
/// \endcode
///
/// Example matches class Derived
/// (matcher = cxxRecordDecl(hasAnyBase(hasType(cxxRecordDecl(hasName("Base"))))))
/// \code
/// class Base {};
/// class Derived : Base {};
/// \endcode
///
/// Usable as: Matcher<Expr>, Matcher<FriendDecl>, Matcher<ValueDecl>,
/// Matcher<CXXBaseSpecifier>
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
hasType,
AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, ValueDecl,
CXXBaseSpecifier),
internal::Matcher<Decl>, InnerMatcher, 1) {
QualType QT = internal::getUnderlyingType(Node);
if (!QT.isNull())
return qualType(hasDeclaration(InnerMatcher)).matches(QT, Finder, Builder);
return false;
}
/// Matches if the type location of the declarator decl's type matches
/// the inner matcher.
///
/// Given
/// \code
/// int x;
/// \endcode
/// declaratorDecl(hasTypeLoc(loc(asString("int"))))
/// matches int x
AST_MATCHER_P(DeclaratorDecl, hasTypeLoc, internal::Matcher<TypeLoc>, Inner) {
if (!Node.getTypeSourceInfo())
// This happens for example for implicit destructors.
return false;
return Inner.matches(Node.getTypeSourceInfo()->getTypeLoc(), Finder, Builder);
}
/// Matches if the matched type is represented by the given string.
///
/// Given
/// \code
/// class Y { public: void x(); };
/// void z() { Y* y; y->x(); }
/// \endcode
/// cxxMemberCallExpr(on(hasType(asString("class Y *"))))
/// matches y->x()
AST_MATCHER_P(QualType, asString, std::string, Name) {
return Name == Node.getAsString();
}
/// Matches if the matched type is a pointer type and the pointee type
/// matches the specified matcher.
///
/// Example matches y->x()
/// (matcher = cxxMemberCallExpr(on(hasType(pointsTo
/// cxxRecordDecl(hasName("Y")))))))
/// \code
/// class Y { public: void x(); };
/// void z() { Y *y; y->x(); }
/// \endcode
AST_MATCHER_P(
QualType, pointsTo, internal::Matcher<QualType>,
InnerMatcher) {
return (!Node.isNull() && Node->isAnyPointerType() &&
InnerMatcher.matches(Node->getPointeeType(), Finder, Builder));
}
/// Overloaded to match the pointee type's declaration.
AST_MATCHER_P_OVERLOAD(QualType, pointsTo, internal::Matcher<Decl>,
InnerMatcher, 1) {
return pointsTo(qualType(hasDeclaration(InnerMatcher)))
.matches(Node, Finder, Builder);
}
/// Matches if the matched type matches the unqualified desugared
/// type of the matched node.
///
/// For example, in:
/// \code
/// class A {};
/// using B = A;
/// \endcode
/// The matcher type(hasUnqualifiedDesugaredType(recordType())) matches
/// both B and A.
AST_MATCHER_P(Type, hasUnqualifiedDesugaredType, internal::Matcher<Type>,
InnerMatcher) {
return InnerMatcher.matches(*Node.getUnqualifiedDesugaredType(), Finder,
Builder);
}
/// Matches if the matched type is a reference type and the referenced
/// type matches the specified matcher.
///
/// Example matches X &x and const X &y
/// (matcher = varDecl(hasType(references(cxxRecordDecl(hasName("X"))))))
/// \code
/// class X {
/// void a(X b) {
/// X &x = b;
/// const X &y = b;
/// }
/// };
/// \endcode
AST_MATCHER_P(QualType, references, internal::Matcher<QualType>,
InnerMatcher) {
return (!Node.isNull() && Node->isReferenceType() &&
InnerMatcher.matches(Node->getPointeeType(), Finder, Builder));
}
/// Matches QualTypes whose canonical type matches InnerMatcher.
///
/// Given:
/// \code
/// typedef int &int_ref;
/// int a;
/// int_ref b = a;
/// \endcode
///
/// \c varDecl(hasType(qualType(referenceType()))))) will not match the
/// declaration of b but \c
/// varDecl(hasType(qualType(hasCanonicalType(referenceType())))))) does.
AST_MATCHER_P(QualType, hasCanonicalType, internal::Matcher<QualType>,
InnerMatcher) {
if (Node.isNull())
return false;
return InnerMatcher.matches(Node.getCanonicalType(), Finder, Builder);
}
/// Overloaded to match the referenced type's declaration.
AST_MATCHER_P_OVERLOAD(QualType, references, internal::Matcher<Decl>,
InnerMatcher, 1) {
return references(qualType(hasDeclaration(InnerMatcher)))
.matches(Node, Finder, Builder);
}
/// Matches on the implicit object argument of a member call expression. Unlike
/// `on`, matches the argument directly without stripping away anything.
///
/// Given
/// \code
/// class Y { public: void m(); };
/// Y g();
/// class X : public Y { void g(); };
/// void z(Y y, X x) { y.m(); x.m(); x.g(); (g()).m(); }
/// \endcode
/// cxxMemberCallExpr(onImplicitObjectArgument(hasType(
/// cxxRecordDecl(hasName("Y")))))
/// matches `y.m()`, `x.m()` and (g()).m(), but not `x.g()`.
/// cxxMemberCallExpr(on(callExpr()))
/// does not match `(g()).m()`, because the parens are not ignored.
///
/// FIXME: Overload to allow directly matching types?
AST_MATCHER_P(CXXMemberCallExpr, onImplicitObjectArgument,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *ExprNode = Node.getImplicitObjectArgument();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
/// Matches if the type of the expression's implicit object argument either
/// matches the InnerMatcher, or is a pointer to a type that matches the
/// InnerMatcher.
///
/// Given
/// \code
/// class Y { public: void m(); };
/// class X : public Y { void g(); };
/// void z() { Y y; y.m(); Y *p; p->m(); X x; x.m(); x.g(); }
/// \endcode
/// cxxMemberCallExpr(thisPointerType(hasDeclaration(
/// cxxRecordDecl(hasName("Y")))))
/// matches `y.m()`, `p->m()` and `x.m()`.
/// cxxMemberCallExpr(thisPointerType(hasDeclaration(
/// cxxRecordDecl(hasName("X")))))
/// matches `x.g()`.
AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType,
internal::Matcher<QualType>, InnerMatcher, 0) {
return onImplicitObjectArgument(
anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher))))
.matches(Node, Finder, Builder);
}
/// Overloaded to match the type's declaration.
AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType,
internal::Matcher<Decl>, InnerMatcher, 1) {
return onImplicitObjectArgument(
anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher))))
.matches(Node, Finder, Builder);
}
/// Matches a DeclRefExpr that refers to a declaration that matches the
/// specified matcher.
///
/// Example matches x in if(x)
/// (matcher = declRefExpr(to(varDecl(hasName("x")))))
/// \code
/// bool x;
/// if (x) {}
/// \endcode
AST_MATCHER_P(DeclRefExpr, to, internal::Matcher<Decl>,
InnerMatcher) {
const Decl *DeclNode = Node.getDecl();
return (DeclNode != nullptr &&
InnerMatcher.matches(*DeclNode, Finder, Builder));
}
/// Matches a \c DeclRefExpr that refers to a declaration through a
/// specific using shadow declaration.
///
/// Given
/// \code
/// namespace a { void f() {} }
/// using a::f;
/// void g() {
/// f(); // Matches this ..
/// a::f(); // .. but not this.
/// }
/// \endcode
/// declRefExpr(throughUsingDecl(anything()))
/// matches \c f()
AST_MATCHER_P(DeclRefExpr, throughUsingDecl,
internal::Matcher<UsingShadowDecl>, InnerMatcher) {
const NamedDecl *FoundDecl = Node.getFoundDecl();
if (const UsingShadowDecl *UsingDecl = dyn_cast<UsingShadowDecl>(FoundDecl))
return InnerMatcher.matches(*UsingDecl, Finder, Builder);
return false;
}
/// Matches an \c OverloadExpr if any of the declarations in the set of
/// overloads matches the given matcher.
///
/// Given
/// \code
/// template <typename T> void foo(T);
/// template <typename T> void bar(T);
/// template <typename T> void baz(T t) {
/// foo(t);
/// bar(t);
/// }
/// \endcode
/// unresolvedLookupExpr(hasAnyDeclaration(
/// functionTemplateDecl(hasName("foo"))))
/// matches \c foo in \c foo(t); but not \c bar in \c bar(t);
AST_MATCHER_P(OverloadExpr, hasAnyDeclaration, internal::Matcher<Decl>,
InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.decls_begin(),
Node.decls_end(), Finder, Builder);
}
/// Matches the Decl of a DeclStmt which has a single declaration.
///
/// Given
/// \code
/// int a, b;
/// int c;
/// \endcode
/// declStmt(hasSingleDecl(anything()))
/// matches 'int c;' but not 'int a, b;'.
AST_MATCHER_P(DeclStmt, hasSingleDecl, internal::Matcher<Decl>, InnerMatcher) {
if (Node.isSingleDecl()) {
const Decl *FoundDecl = Node.getSingleDecl();
return InnerMatcher.matches(*FoundDecl, Finder, Builder);
}
return false;
}
/// Matches a variable declaration that has an initializer expression
/// that matches the given matcher.
///
/// Example matches x (matcher = varDecl(hasInitializer(callExpr())))
/// \code
/// bool y() { return true; }
/// bool x = y();
/// \endcode
AST_MATCHER_P(
VarDecl, hasInitializer, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *Initializer = Node.getAnyInitializer();
return (Initializer != nullptr &&
InnerMatcher.matches(*Initializer, Finder, Builder));
}
/// \brief Matches a static variable with local scope.
///
/// Example matches y (matcher = varDecl(isStaticLocal()))
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// static int z;
/// \endcode
AST_MATCHER(VarDecl, isStaticLocal) {
return Node.isStaticLocal();
}
/// Matches a variable declaration that has function scope and is a
/// non-static local variable.
///
/// Example matches x (matcher = varDecl(hasLocalStorage())
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// int z;
/// \endcode
AST_MATCHER(VarDecl, hasLocalStorage) {
return Node.hasLocalStorage();
}
/// Matches a variable declaration that does not have local storage.
///
/// Example matches y and z (matcher = varDecl(hasGlobalStorage())
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// int z;
/// \endcode
AST_MATCHER(VarDecl, hasGlobalStorage) {
return Node.hasGlobalStorage();
}
/// Matches a variable declaration that has automatic storage duration.
///
/// Example matches x, but not y, z, or a.
/// (matcher = varDecl(hasAutomaticStorageDuration())
/// \code
/// void f() {
/// int x;
/// static int y;
/// thread_local int z;
/// }
/// int a;
/// \endcode
AST_MATCHER(VarDecl, hasAutomaticStorageDuration) {
return Node.getStorageDuration() == SD_Automatic;
}
/// Matches a variable declaration that has static storage duration.
/// It includes the variable declared at namespace scope and those declared
/// with "static" and "extern" storage class specifiers.
///
/// \code
/// void f() {
/// int x;
/// static int y;
/// thread_local int z;
/// }
/// int a;
/// static int b;
/// extern int c;
/// varDecl(hasStaticStorageDuration())
/// matches the function declaration y, a, b and c.
/// \endcode
AST_MATCHER(VarDecl, hasStaticStorageDuration) {
return Node.getStorageDuration() == SD_Static;
}
/// Matches a variable declaration that has thread storage duration.
///
/// Example matches z, but not x, z, or a.
/// (matcher = varDecl(hasThreadStorageDuration())
/// \code
/// void f() {
/// int x;
/// static int y;
/// thread_local int z;
/// }
/// int a;
/// \endcode
AST_MATCHER(VarDecl, hasThreadStorageDuration) {
return Node.getStorageDuration() == SD_Thread;
}
/// Matches a variable declaration that is an exception variable from
/// a C++ catch block, or an Objective-C \@catch statement.
///
/// Example matches x (matcher = varDecl(isExceptionVariable())
/// \code
/// void f(int y) {
/// try {
/// } catch (int x) {
/// }
/// }
/// \endcode
AST_MATCHER(VarDecl, isExceptionVariable) {
return Node.isExceptionVariable();
}
/// Checks that a call expression or a constructor call expression has
/// a specific number of arguments (including absent default arguments).
///
/// Example matches f(0, 0) (matcher = callExpr(argumentCountIs(2)))
/// \code
/// void f(int x, int y);
/// f(0, 0);
/// \endcode
AST_POLYMORPHIC_MATCHER_P(argumentCountIs,
AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr,
CXXConstructExpr,
ObjCMessageExpr),
unsigned, N) {
return Node.getNumArgs() == N;
}
/// Matches the n'th argument of a call expression or a constructor
/// call expression.
///
/// Example matches y in x(y)
/// (matcher = callExpr(hasArgument(0, declRefExpr())))
/// \code
/// void x(int) { int y; x(y); }
/// \endcode
AST_POLYMORPHIC_MATCHER_P2(hasArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr,
CXXConstructExpr,
ObjCMessageExpr),
unsigned, N, internal::Matcher<Expr>, InnerMatcher) {
return (N < Node.getNumArgs() &&
InnerMatcher.matches(
*Node.getArg(N)->IgnoreParenImpCasts(), Finder, Builder));
}
/// Matches the n'th item of an initializer list expression.
///
/// Example matches y.
/// (matcher = initListExpr(hasInit(0, expr())))
/// \code
/// int x{y}.
/// \endcode
AST_MATCHER_P2(InitListExpr, hasInit, unsigned, N,
ast_matchers::internal::Matcher<Expr>, InnerMatcher) {
return N < Node.getNumInits() &&
InnerMatcher.matches(*Node.getInit(N), Finder, Builder);
}
/// Matches declaration statements that contain a specific number of
/// declarations.
///
/// Example: Given
/// \code
/// int a, b;
/// int c;
/// int d = 2, e;
/// \endcode
/// declCountIs(2)
/// matches 'int a, b;' and 'int d = 2, e;', but not 'int c;'.
AST_MATCHER_P(DeclStmt, declCountIs, unsigned, N) {
return std::distance(Node.decl_begin(), Node.decl_end()) == (ptrdiff_t)N;
}
/// Matches the n'th declaration of a declaration statement.
///
/// Note that this does not work for global declarations because the AST
/// breaks up multiple-declaration DeclStmt's into multiple single-declaration
/// DeclStmt's.
/// Example: Given non-global declarations
/// \code
/// int a, b = 0;
/// int c;
/// int d = 2, e;
/// \endcode
/// declStmt(containsDeclaration(
/// 0, varDecl(hasInitializer(anything()))))
/// matches only 'int d = 2, e;', and
/// declStmt(containsDeclaration(1, varDecl()))
/// \code
/// matches 'int a, b = 0' as well as 'int d = 2, e;'
/// but 'int c;' is not matched.
/// \endcode
AST_MATCHER_P2(DeclStmt, containsDeclaration, unsigned, N,
internal::Matcher<Decl>, InnerMatcher) {
const unsigned NumDecls = std::distance(Node.decl_begin(), Node.decl_end());
if (N >= NumDecls)
return false;
DeclStmt::const_decl_iterator Iterator = Node.decl_begin();
std::advance(Iterator, N);
return InnerMatcher.matches(**Iterator, Finder, Builder);
}
/// Matches a C++ catch statement that has a catch-all handler.
///
/// Given
/// \code
/// try {
/// // ...
/// } catch (int) {
/// // ...
/// } catch (...) {
/// // ...
/// }
/// \endcode
/// cxxCatchStmt(isCatchAll()) matches catch(...) but not catch(int).
AST_MATCHER(CXXCatchStmt, isCatchAll) {
return Node.getExceptionDecl() == nullptr;
}
/// Matches a constructor initializer.
///
/// Given
/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
/// \endcode
/// cxxRecordDecl(has(cxxConstructorDecl(
/// hasAnyConstructorInitializer(anything())
/// )))
/// record matches Foo, hasAnyConstructorInitializer matches foo_(1)
AST_MATCHER_P(CXXConstructorDecl, hasAnyConstructorInitializer,
internal::Matcher<CXXCtorInitializer>, InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.init_begin(),
Node.init_end(), Finder, Builder);
}
/// Matches the field declaration of a constructor initializer.
///
/// Given
/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
/// \endcode
/// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer(
/// forField(hasName("foo_"))))))
/// matches Foo
/// with forField matching foo_
AST_MATCHER_P(CXXCtorInitializer, forField,
internal::Matcher<FieldDecl>, InnerMatcher) {
const FieldDecl *NodeAsDecl = Node.getAnyMember();
return (NodeAsDecl != nullptr &&
InnerMatcher.matches(*NodeAsDecl, Finder, Builder));
}
/// Matches the initializer expression of a constructor initializer.
///
/// Given
/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
/// \endcode
/// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer(
/// withInitializer(integerLiteral(equals(1)))))))
/// matches Foo
/// with withInitializer matching (1)
AST_MATCHER_P(CXXCtorInitializer, withInitializer,
internal::Matcher<Expr>, InnerMatcher) {
const Expr* NodeAsExpr = Node.getInit();
return (NodeAsExpr != nullptr &&
InnerMatcher.matches(*NodeAsExpr, Finder, Builder));
}
/// Matches a constructor initializer if it is explicitly written in
/// code (as opposed to implicitly added by the compiler).
///
/// Given
/// \code
/// struct Foo {
/// Foo() { }
/// Foo(int) : foo_("A") { }
/// string foo_;
/// };
/// \endcode
/// cxxConstructorDecl(hasAnyConstructorInitializer(isWritten()))
/// will match Foo(int), but not Foo()
AST_MATCHER(CXXCtorInitializer, isWritten) {
return Node.isWritten();
}
/// Matches a constructor initializer if it is initializing a base, as
/// opposed to a member.
///
/// Given
/// \code
/// struct B {};
/// struct D : B {
/// int I;
/// D(int i) : I(i) {}
/// };
/// struct E : B {
/// E() : B() {}
/// };
/// \endcode
/// cxxConstructorDecl(hasAnyConstructorInitializer(isBaseInitializer()))
/// will match E(), but not match D(int).
AST_MATCHER(CXXCtorInitializer, isBaseInitializer) {
return Node.isBaseInitializer();
}
/// Matches a constructor initializer if it is initializing a member, as
/// opposed to a base.
///
/// Given
/// \code
/// struct B {};
/// struct D : B {
/// int I;
/// D(int i) : I(i) {}
/// };
/// struct E : B {
/// E() : B() {}
/// };
/// \endcode
/// cxxConstructorDecl(hasAnyConstructorInitializer(isMemberInitializer()))
/// will match D(int), but not match E().
AST_MATCHER(CXXCtorInitializer, isMemberInitializer) {
return Node.isMemberInitializer();
}
/// Matches any argument of a call expression or a constructor call
/// expression, or an ObjC-message-send expression.
///
/// Given
/// \code
/// void x(int, int, int) { int y; x(1, y, 42); }
/// \endcode
/// callExpr(hasAnyArgument(declRefExpr()))
/// matches x(1, y, 42)
/// with hasAnyArgument(...)
/// matching y
///
/// For ObjectiveC, given
/// \code
/// @interface I - (void) f:(int) y; @end
/// void foo(I *i) { [i f:12]; }
/// \endcode
/// objcMessageExpr(hasAnyArgument(integerLiteral(equals(12))))
/// matches [i f:12]
AST_POLYMORPHIC_MATCHER_P(hasAnyArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(
CallExpr, CXXConstructExpr,
CXXUnresolvedConstructExpr, ObjCMessageExpr),
internal::Matcher<Expr>, InnerMatcher) {
for (const Expr *Arg : Node.arguments()) {
BoundNodesTreeBuilder Result(*Builder);
if (InnerMatcher.matches(*Arg, Finder, &Result)) {
*Builder = std::move(Result);
return true;
}
}
return false;
}
/// Matches any capture of a lambda expression.
///
/// Given
/// \code
/// void foo() {
/// int x;
/// auto f = [x](){};
/// }
/// \endcode
/// lambdaExpr(hasAnyCapture(anything()))
/// matches [x](){};
AST_MATCHER_P_OVERLOAD(LambdaExpr, hasAnyCapture, internal::Matcher<VarDecl>,
InnerMatcher, 0) {
for (const LambdaCapture &Capture : Node.captures()) {
if (Capture.capturesVariable()) {
BoundNodesTreeBuilder Result(*Builder);
if (InnerMatcher.matches(*Capture.getCapturedVar(), Finder, &Result)) {
*Builder = std::move(Result);
return true;
}
}
}
return false;
}
/// Matches any capture of 'this' in a lambda expression.
///
/// Given
/// \code
/// struct foo {
/// void bar() {
/// auto f = [this](){};
/// }
/// }
/// \endcode
/// lambdaExpr(hasAnyCapture(cxxThisExpr()))
/// matches [this](){};
AST_MATCHER_P_OVERLOAD(LambdaExpr, hasAnyCapture,
internal::Matcher<CXXThisExpr>, InnerMatcher, 1) {
return llvm::any_of(Node.captures(), [](const LambdaCapture &LC) {
return LC.capturesThis();
});
}
/// Matches a constructor call expression which uses list initialization.
AST_MATCHER(CXXConstructExpr, isListInitialization) {
return Node.isListInitialization();
}
/// Matches a constructor call expression which requires
/// zero initialization.
///
/// Given
/// \code
/// void foo() {
/// struct point { double x; double y; };
/// point pt[2] = { { 1.0, 2.0 } };
/// }
/// \endcode
/// initListExpr(has(cxxConstructExpr(requiresZeroInitialization()))
/// will match the implicit array filler for pt[1].
AST_MATCHER(CXXConstructExpr, requiresZeroInitialization) {
return Node.requiresZeroInitialization();
}
/// Matches the n'th parameter of a function or an ObjC method
/// declaration or a block.
///
/// Given
/// \code
/// class X { void f(int x) {} };
/// \endcode
/// cxxMethodDecl(hasParameter(0, hasType(varDecl())))
/// matches f(int x) {}
/// with hasParameter(...)
/// matching int x
///
/// For ObjectiveC, given
/// \code
/// @interface I - (void) f:(int) y; @end
/// \endcode
//
/// the matcher objcMethodDecl(hasParameter(0, hasName("y")))
/// matches the declaration of method f with hasParameter
/// matching y.
AST_POLYMORPHIC_MATCHER_P2(hasParameter,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
ObjCMethodDecl,
BlockDecl),
unsigned, N, internal::Matcher<ParmVarDecl>,
InnerMatcher) {
return (N < Node.parameters().size()
&& InnerMatcher.matches(*Node.parameters()[N], Finder, Builder));
}
/// Matches all arguments and their respective ParmVarDecl.
///
/// Given
/// \code
/// void f(int i);
/// int y;
/// f(y);
/// \endcode
/// callExpr(
/// forEachArgumentWithParam(
/// declRefExpr(to(varDecl(hasName("y")))),
/// parmVarDecl(hasType(isInteger()))
/// ))
/// matches f(y);
/// with declRefExpr(...)
/// matching int y
/// and parmVarDecl(...)
/// matching int i
AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParam,
AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr,
CXXConstructExpr),
internal::Matcher<Expr>, ArgMatcher,
internal::Matcher<ParmVarDecl>, ParamMatcher) {
BoundNodesTreeBuilder Result;
// The first argument of an overloaded member operator is the implicit object
// argument of the method which should not be matched against a parameter, so
// we skip over it here.
BoundNodesTreeBuilder Matches;
unsigned ArgIndex = cxxOperatorCallExpr(callee(cxxMethodDecl()))
.matches(Node, Finder, &Matches)
? 1
: 0;
int ParamIndex = 0;
bool Matched = false;
for (; ArgIndex < Node.getNumArgs(); ++ArgIndex) {
BoundNodesTreeBuilder ArgMatches(*Builder);
if (ArgMatcher.matches(*(Node.getArg(ArgIndex)->IgnoreParenCasts()),
Finder, &ArgMatches)) {
BoundNodesTreeBuilder ParamMatches(ArgMatches);
if (expr(anyOf(cxxConstructExpr(hasDeclaration(cxxConstructorDecl(
hasParameter(ParamIndex, ParamMatcher)))),
callExpr(callee(functionDecl(
hasParameter(ParamIndex, ParamMatcher))))))
.matches(Node, Finder, &ParamMatches)) {
Result.addMatch(ParamMatches);
Matched = true;
}
}
++ParamIndex;
}
*Builder = std::move(Result);
return Matched;
}
/// Matches the ParmVarDecl nodes that are at the N'th position in the parameter
/// list. The parameter list could be that of either a block, function, or
/// objc-method.
///
///
/// Given
///
/// \code
/// void f(int a, int b, int c) {
/// }
/// \endcode
///
/// ``parmVarDecl(isAtPosition(0))`` matches ``int a``.
///
/// ``parmVarDecl(isAtPosition(1))`` matches ``int b``.
AST_MATCHER_P(clang::ParmVarDecl, isAtPosition, unsigned, N) {
const clang::DeclContext *Context = Node.getParentFunctionOrMethod();
if (const auto *Decl = dyn_cast_or_null<FunctionDecl>(Context))
return N < Decl->param_size() && Decl->getParamDecl(N) == &Node;
if (const auto *Decl = dyn_cast_or_null<BlockDecl>(Context))
return N < Decl->param_size() && Decl->getParamDecl(N) == &Node;
if (const auto *Decl = dyn_cast_or_null<ObjCMethodDecl>(Context))
return N < Decl->param_size() && Decl->getParamDecl(N) == &Node;
return false;
}
/// Matches any parameter of a function or an ObjC method declaration or a
/// block.
///
/// Does not match the 'this' parameter of a method.
///
/// Given
/// \code
/// class X { void f(int x, int y, int z) {} };
/// \endcode
/// cxxMethodDecl(hasAnyParameter(hasName("y")))
/// matches f(int x, int y, int z) {}
/// with hasAnyParameter(...)
/// matching int y
///
/// For ObjectiveC, given
/// \code
/// @interface I - (void) f:(int) y; @end
/// \endcode
//
/// the matcher objcMethodDecl(hasAnyParameter(hasName("y")))
/// matches the declaration of method f with hasParameter
/// matching y.
///
/// For blocks, given
/// \code
/// b = ^(int y) { printf("%d", y) };
/// \endcode
///
/// the matcher blockDecl(hasAnyParameter(hasName("y")))
/// matches the declaration of the block b with hasParameter
/// matching y.
AST_POLYMORPHIC_MATCHER_P(hasAnyParameter,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
ObjCMethodDecl,
BlockDecl),
internal::Matcher<ParmVarDecl>,
InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.param_begin(),
Node.param_end(), Finder, Builder);
}
/// Matches \c FunctionDecls and \c FunctionProtoTypes that have a
/// specific parameter count.
///
/// Given
/// \code
/// void f(int i) {}
/// void g(int i, int j) {}
/// void h(int i, int j);
/// void j(int i);
/// void k(int x, int y, int z, ...);
/// \endcode
/// functionDecl(parameterCountIs(2))
/// matches \c g and \c h
/// functionProtoType(parameterCountIs(2))
/// matches \c g and \c h
/// functionProtoType(parameterCountIs(3))
/// matches \c k
AST_POLYMORPHIC_MATCHER_P(parameterCountIs,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
FunctionProtoType),
unsigned, N) {
return Node.getNumParams() == N;
}
/// Matches \c FunctionDecls that have a noreturn attribute.
///
/// Given
/// \code
/// void nope();
/// [[noreturn]] void a();
/// __attribute__((noreturn)) void b();
/// struct c { [[noreturn]] c(); };
/// \endcode
/// functionDecl(isNoReturn())
/// matches all of those except
/// \code
/// void nope();
/// \endcode
AST_MATCHER(FunctionDecl, isNoReturn) { return Node.isNoReturn(); }
/// Matches the return type of a function declaration.
///
/// Given:
/// \code
/// class X { int f() { return 1; } };
/// \endcode
/// cxxMethodDecl(returns(asString("int")))
/// matches int f() { return 1; }
AST_MATCHER_P(FunctionDecl, returns,
internal::Matcher<QualType>, InnerMatcher) {
return InnerMatcher.matches(Node.getReturnType(), Finder, Builder);
}
/// Matches extern "C" function or variable declarations.
///
/// Given:
/// \code
/// extern "C" void f() {}
/// extern "C" { void g() {} }
/// void h() {}
/// extern "C" int x = 1;
/// extern "C" int y = 2;
/// int z = 3;
/// \endcode
/// functionDecl(isExternC())
/// matches the declaration of f and g, but not the declaration of h.
/// varDecl(isExternC())
/// matches the declaration of x and y, but not the declaration of z.
AST_POLYMORPHIC_MATCHER(isExternC, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
VarDecl)) {
return Node.isExternC();
}
/// Matches variable/function declarations that have "static" storage
/// class specifier ("static" keyword) written in the source.
///
/// Given:
/// \code
/// static void f() {}
/// static int i = 0;
/// extern int j;
/// int k;
/// \endcode
/// functionDecl(isStaticStorageClass())
/// matches the function declaration f.
/// varDecl(isStaticStorageClass())
/// matches the variable declaration i.
AST_POLYMORPHIC_MATCHER(isStaticStorageClass,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
VarDecl)) {
return Node.getStorageClass() == SC_Static;
}
/// Matches deleted function declarations.
///
/// Given:
/// \code
/// void Func();
/// void DeletedFunc() = delete;
/// \endcode
/// functionDecl(isDeleted())
/// matches the declaration of DeletedFunc, but not Func.
AST_MATCHER(FunctionDecl, isDeleted) {
return Node.isDeleted();
}
/// Matches defaulted function declarations.
///
/// Given:
/// \code
/// class A { ~A(); };
/// class B { ~B() = default; };
/// \endcode
/// functionDecl(isDefaulted())
/// matches the declaration of ~B, but not ~A.
AST_MATCHER(FunctionDecl, isDefaulted) {
return Node.isDefaulted();
}
/// Matches functions that have a dynamic exception specification.
///
/// Given:
/// \code
/// void f();
/// void g() noexcept;
/// void h() noexcept(true);
/// void i() noexcept(false);
/// void j() throw();
/// void k() throw(int);
/// void l() throw(...);
/// \endcode
/// functionDecl(hasDynamicExceptionSpec()) and
/// functionProtoType(hasDynamicExceptionSpec())
/// match the declarations of j, k, and l, but not f, g, h, or i.
AST_POLYMORPHIC_MATCHER(hasDynamicExceptionSpec,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
FunctionProtoType)) {
if (const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node))
return FnTy->hasDynamicExceptionSpec();
return false;
}
/// Matches functions that have a non-throwing exception specification.
///
/// Given:
/// \code
/// void f();
/// void g() noexcept;
/// void h() throw();
/// void i() throw(int);
/// void j() noexcept(false);
/// \endcode
/// functionDecl(isNoThrow()) and functionProtoType(isNoThrow())
/// match the declarations of g, and h, but not f, i or j.
AST_POLYMORPHIC_MATCHER(isNoThrow,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
FunctionProtoType)) {
const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node);
// If the function does not have a prototype, then it is assumed to be a
// throwing function (as it would if the function did not have any exception
// specification).
if (!FnTy)
return false;
// Assume the best for any unresolved exception specification.
if (isUnresolvedExceptionSpec(FnTy->getExceptionSpecType()))
return true;
return FnTy->isNothrow();
}
/// Matches constexpr variable and function declarations,
/// and if constexpr.
///
/// Given:
/// \code
/// constexpr int foo = 42;
/// constexpr int bar();
/// void baz() { if constexpr(1 > 0) {} }
/// \endcode
/// varDecl(isConstexpr())
/// matches the declaration of foo.
/// functionDecl(isConstexpr())
/// matches the declaration of bar.
/// ifStmt(isConstexpr())
/// matches the if statement in baz.
AST_POLYMORPHIC_MATCHER(isConstexpr,
AST_POLYMORPHIC_SUPPORTED_TYPES(VarDecl,
FunctionDecl,
IfStmt)) {
return Node.isConstexpr();
}
/// Matches selection statements with initializer.
///
/// Given:
/// \code
/// void foo() {
/// if (int i = foobar(); i > 0) {}
/// switch (int i = foobar(); i) {}
/// for (auto& a = get_range(); auto& x : a) {}
/// }
/// void bar() {
/// if (foobar() > 0) {}
/// switch (foobar()) {}
/// for (auto& x : get_range()) {}
/// }
/// \endcode
/// ifStmt(hasInitStatement(anything()))
/// matches the if statement in foo but not in bar.
/// switchStmt(hasInitStatement(anything()))
/// matches the switch statement in foo but not in bar.
/// cxxForRangeStmt(hasInitStatement(anything()))
/// matches the range for statement in foo but not in bar.
AST_POLYMORPHIC_MATCHER_P(hasInitStatement,
AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, SwitchStmt,
CXXForRangeStmt),
internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *Init = Node.getInit();
return Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder);
}
/// Matches the condition expression of an if statement, for loop,
/// switch statement or conditional operator.
///
/// Example matches true (matcher = hasCondition(cxxBoolLiteral(equals(true))))
/// \code
/// if (true) {}
/// \endcode
AST_POLYMORPHIC_MATCHER_P(
hasCondition,
AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, ForStmt, WhileStmt, DoStmt,
SwitchStmt, AbstractConditionalOperator),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *const Condition = Node.getCond();
return (Condition != nullptr &&
InnerMatcher.matches(*Condition, Finder, Builder));
}
/// Matches the then-statement of an if statement.
///
/// Examples matches the if statement
/// (matcher = ifStmt(hasThen(cxxBoolLiteral(equals(true)))))
/// \code
/// if (false) true; else false;
/// \endcode
AST_MATCHER_P(IfStmt, hasThen, internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Then = Node.getThen();
return (Then != nullptr && InnerMatcher.matches(*Then, Finder, Builder));
}
/// Matches the else-statement of an if statement.
///
/// Examples matches the if statement
/// (matcher = ifStmt(hasElse(cxxBoolLiteral(equals(true)))))
/// \code
/// if (false) false; else true;
/// \endcode
AST_MATCHER_P(IfStmt, hasElse, internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Else = Node.getElse();
return (Else != nullptr && InnerMatcher.matches(*Else, Finder, Builder));
}
/// Matches if a node equals a previously bound node.
///
/// Matches a node if it equals the node previously bound to \p ID.
///
/// Given
/// \code
/// class X { int a; int b; };
/// \endcode
/// cxxRecordDecl(
/// has(fieldDecl(hasName("a"), hasType(type().bind("t")))),
/// has(fieldDecl(hasName("b"), hasType(type(equalsBoundNode("t"))))))
/// matches the class \c X, as \c a and \c b have the same type.
///
/// Note that when multiple matches are involved via \c forEach* matchers,
/// \c equalsBoundNodes acts as a filter.
/// For example:
/// compoundStmt(
/// forEachDescendant(varDecl().bind("d")),
/// forEachDescendant(declRefExpr(to(decl(equalsBoundNode("d"))))))
/// will trigger a match for each combination of variable declaration
/// and reference to that variable declaration within a compound statement.
AST_POLYMORPHIC_MATCHER_P(equalsBoundNode,
AST_POLYMORPHIC_SUPPORTED_TYPES(Stmt, Decl, Type,
QualType),
std::string, ID) {
// FIXME: Figure out whether it makes sense to allow this
// on any other node types.
// For *Loc it probably does not make sense, as those seem
// unique. For NestedNameSepcifier it might make sense, as
// those also have pointer identity, but I'm not sure whether
// they're ever reused.
internal::NotEqualsBoundNodePredicate Predicate;
Predicate.ID = ID;
Predicate.Node = DynTypedNode::create(Node);
return Builder->removeBindings(Predicate);
}
/// Matches the condition variable statement in an if statement.
///
/// Given
/// \code
/// if (A* a = GetAPointer()) {}
/// \endcode
/// hasConditionVariableStatement(...)
/// matches 'A* a = GetAPointer()'.
AST_MATCHER_P(IfStmt, hasConditionVariableStatement,
internal::Matcher<DeclStmt>, InnerMatcher) {
const DeclStmt* const DeclarationStatement =
Node.getConditionVariableDeclStmt();
return DeclarationStatement != nullptr &&
InnerMatcher.matches(*DeclarationStatement, Finder, Builder);
}
/// Matches the index expression of an array subscript expression.
///
/// Given
/// \code
/// int i[5];
/// void f() { i[1] = 42; }
/// \endcode
/// arraySubscriptExpression(hasIndex(integerLiteral()))
/// matches \c i[1] with the \c integerLiteral() matching \c 1
AST_MATCHER_P(ArraySubscriptExpr, hasIndex,
internal::Matcher<Expr>, InnerMatcher) {
if (const Expr* Expression = Node.getIdx())
return InnerMatcher.matches(*Expression, Finder, Builder);
return false;
}
/// Matches the base expression of an array subscript expression.
///
/// Given
/// \code
/// int i[5];
/// void f() { i[1] = 42; }
/// \endcode
/// arraySubscriptExpression(hasBase(implicitCastExpr(
/// hasSourceExpression(declRefExpr()))))
/// matches \c i[1] with the \c declRefExpr() matching \c i
AST_MATCHER_P(ArraySubscriptExpr, hasBase,
internal::Matcher<Expr>, InnerMatcher) {
if (const Expr* Expression = Node.getBase())
return InnerMatcher.matches(*Expression, Finder, Builder);
return false;
}
/// Matches a 'for', 'while', 'do while' statement or a function
/// definition that has a given body.
///
/// Given
/// \code
/// for (;;) {}
/// \endcode
/// hasBody(compoundStmt())
/// matches 'for (;;) {}'
/// with compoundStmt()
/// matching '{}'
AST_POLYMORPHIC_MATCHER_P(hasBody,
AST_POLYMORPHIC_SUPPORTED_TYPES(DoStmt, ForStmt,
WhileStmt,
CXXForRangeStmt,
FunctionDecl),
internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Statement = internal::GetBodyMatcher<NodeType>::get(Node);
return (Statement != nullptr &&
InnerMatcher.matches(*Statement, Finder, Builder));
}
/// Matches compound statements where at least one substatement matches
/// a given matcher. Also matches StmtExprs that have CompoundStmt as children.
///
/// Given
/// \code
/// { {}; 1+2; }
/// \endcode
/// hasAnySubstatement(compoundStmt())
/// matches '{ {}; 1+2; }'
/// with compoundStmt()
/// matching '{}'
AST_POLYMORPHIC_MATCHER_P(hasAnySubstatement,
AST_POLYMORPHIC_SUPPORTED_TYPES(CompoundStmt,
StmtExpr),
internal::Matcher<Stmt>, InnerMatcher) {
const CompoundStmt *CS = CompoundStmtMatcher<NodeType>::get(Node);
return CS && matchesFirstInPointerRange(InnerMatcher, CS->body_begin(),
CS->body_end(), Finder, Builder);
}
/// Checks that a compound statement contains a specific number of
/// child statements.
///
/// Example: Given
/// \code
/// { for (;;) {} }
/// \endcode
/// compoundStmt(statementCountIs(0)))
/// matches '{}'
/// but does not match the outer compound statement.
AST_MATCHER_P(CompoundStmt, statementCountIs, unsigned, N) {
return Node.size() == N;
}
/// Matches literals that are equal to the given value of type ValueT.
///
/// Given
/// \code
/// f('\0', false, 3.14, 42);
/// \endcode
/// characterLiteral(equals(0))
/// matches '\0'
/// cxxBoolLiteral(equals(false)) and cxxBoolLiteral(equals(0))
/// match false
/// floatLiteral(equals(3.14)) and floatLiteral(equals(314e-2))
/// match 3.14
/// integerLiteral(equals(42))
/// matches 42
///
/// Note that you cannot directly match a negative numeric literal because the
/// minus sign is not part of the literal: It is a unary operator whose operand
/// is the positive numeric literal. Instead, you must use a unaryOperator()
/// matcher to match the minus sign:
///
/// unaryOperator(hasOperatorName("-"),
/// hasUnaryOperand(integerLiteral(equals(13))))
///
/// Usable as: Matcher<CharacterLiteral>, Matcher<CXXBoolLiteralExpr>,
/// Matcher<FloatingLiteral>, Matcher<IntegerLiteral>
template <typename ValueT>
internal::PolymorphicMatcherWithParam1<internal::ValueEqualsMatcher, ValueT>
equals(const ValueT &Value) {
return internal::PolymorphicMatcherWithParam1<
internal::ValueEqualsMatcher,
ValueT>(Value);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral,
CXXBoolLiteralExpr,
IntegerLiteral),
bool, Value, 0) {
return internal::ValueEqualsMatcher<NodeType, ParamT>(Value)
.matchesNode(Node);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral,
CXXBoolLiteralExpr,
IntegerLiteral),
unsigned, Value, 1) {
return internal::ValueEqualsMatcher<NodeType, ParamT>(Value)
.matchesNode(Node);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral,
CXXBoolLiteralExpr,
FloatingLiteral,
IntegerLiteral),
double, Value, 2) {
return internal::ValueEqualsMatcher<NodeType, ParamT>(Value)
.matchesNode(Node);
}
/// Matches the operator Name of operator expressions (binary or
/// unary).
///
/// Example matches a || b (matcher = binaryOperator(hasOperatorName("||")))
/// \code
/// !(a || b)
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasOperatorName,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
UnaryOperator),
std::string, Name) {
return Name == Node.getOpcodeStr(Node.getOpcode());
}
/// Matches operator expressions (binary or unary) that have any of the
/// specified names.
///
/// hasAnyOperatorName("+", "-")
/// Is equivalent to
/// anyOf(hasOperatorName("+"), hasOperatorName("-"))
extern const internal::VariadicFunction<
internal::PolymorphicMatcherWithParam1<
internal::HasAnyOperatorNameMatcher, std::vector<std::string>,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, UnaryOperator)>,
StringRef, internal::hasAnyOperatorNameFunc>
hasAnyOperatorName;
/// Matches all kinds of assignment operators.
///
/// Example 1: matches a += b (matcher = binaryOperator(isAssignmentOperator()))
/// \code
/// if (a == b)
/// a += b;
/// \endcode
///
/// Example 2: matches s1 = s2
/// (matcher = cxxOperatorCallExpr(isAssignmentOperator()))
/// \code
/// struct S { S& operator=(const S&); };
/// void x() { S s1, s2; s1 = s2; }
/// \endcode
AST_POLYMORPHIC_MATCHER(isAssignmentOperator,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
CXXOperatorCallExpr)) {
return Node.isAssignmentOp();
}
/// Matches comparison operators.
///
/// Example 1: matches a == b (matcher = binaryOperator(isComparisonOperator()))
/// \code
/// if (a == b)
/// a += b;
/// \endcode
///
/// Example 2: matches s1 < s2
/// (matcher = cxxOperatorCallExpr(isComparisonOperator()))
/// \code
/// struct S { bool operator<(const S& other); };
/// void x(S s1, S s2) { bool b1 = s1 < s2; }
/// \endcode
AST_POLYMORPHIC_MATCHER(isComparisonOperator,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
CXXOperatorCallExpr)) {
return Node.isComparisonOp();
}
/// Matches the left hand side of binary operator expressions.
///
/// Example matches a (matcher = binaryOperator(hasLHS()))
/// \code
/// a || b
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasLHS,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
ArraySubscriptExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *LeftHandSide = Node.getLHS();
return (LeftHandSide != nullptr &&
InnerMatcher.matches(*LeftHandSide, Finder, Builder));
}
/// Matches the right hand side of binary operator expressions.
///
/// Example matches b (matcher = binaryOperator(hasRHS()))
/// \code
/// a || b
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasRHS,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
ArraySubscriptExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *RightHandSide = Node.getRHS();
return (RightHandSide != nullptr &&
InnerMatcher.matches(*RightHandSide, Finder, Builder));
}
/// Matches if either the left hand side or the right hand side of a
/// binary operator matches.
inline internal::Matcher<BinaryOperator> hasEitherOperand(
const internal::Matcher<Expr> &InnerMatcher) {
return anyOf(hasLHS(InnerMatcher), hasRHS(InnerMatcher));
}
/// Matches if both matchers match with opposite sides of the binary operator.
///
/// Example matcher = binaryOperator(hasOperands(integerLiteral(equals(1),
/// integerLiteral(equals(2)))
/// \code
/// 1 + 2 // Match
/// 2 + 1 // Match
/// 1 + 1 // No match
/// 2 + 2 // No match
/// \endcode
inline internal::Matcher<BinaryOperator>
hasOperands(const internal::Matcher<Expr> &Matcher1,
const internal::Matcher<Expr> &Matcher2) {
return anyOf(allOf(hasLHS(Matcher1), hasRHS(Matcher2)),
allOf(hasLHS(Matcher2), hasRHS(Matcher1)));
}
/// Matches if the operand of a unary operator matches.
///
/// Example matches true (matcher = hasUnaryOperand(
/// cxxBoolLiteral(equals(true))))
/// \code
/// !true
/// \endcode
AST_MATCHER_P(UnaryOperator, hasUnaryOperand,
internal::Matcher<Expr>, InnerMatcher) {
const Expr * const Operand = Node.getSubExpr();
return (Operand != nullptr &&
InnerMatcher.matches(*Operand, Finder, Builder));
}
/// Matches if the cast's source expression
/// or opaque value's source expression matches the given matcher.
///
/// Example 1: matches "a string"
/// (matcher = castExpr(hasSourceExpression(cxxConstructExpr())))
/// \code
/// class URL { URL(string); };
/// URL url = "a string";
/// \endcode
///
/// Example 2: matches 'b' (matcher =
/// opaqueValueExpr(hasSourceExpression(implicitCastExpr(declRefExpr())))
/// \code
/// int a = b ?: 1;
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasSourceExpression,
AST_POLYMORPHIC_SUPPORTED_TYPES(CastExpr,
OpaqueValueExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *const SubExpression =
internal::GetSourceExpressionMatcher<NodeType>::get(Node);
return (SubExpression != nullptr &&
InnerMatcher.matches(*SubExpression, Finder, Builder));
}
/// Matches casts that has a given cast kind.
///
/// Example: matches the implicit cast around \c 0
/// (matcher = castExpr(hasCastKind(CK_NullToPointer)))
/// \code
/// int *p = 0;
/// \endcode
///
/// If the matcher is use from clang-query, CastKind parameter
/// should be passed as a quoted string. e.g., hasCastKind("CK_NullToPointer").
AST_MATCHER_P(CastExpr, hasCastKind, CastKind, Kind) {
return Node.getCastKind() == Kind;
}
/// Matches casts whose destination type matches a given matcher.
///
/// (Note: Clang's AST refers to other conversions as "casts" too, and calls
/// actual casts "explicit" casts.)
AST_MATCHER_P(ExplicitCastExpr, hasDestinationType,
internal::Matcher<QualType>, InnerMatcher) {
const QualType NodeType = Node.getTypeAsWritten();
return InnerMatcher.matches(NodeType, Finder, Builder);
}
/// Matches implicit casts whose destination type matches a given
/// matcher.
///
/// FIXME: Unit test this matcher
AST_MATCHER_P(ImplicitCastExpr, hasImplicitDestinationType,
internal::Matcher<QualType>, InnerMatcher) {
return InnerMatcher.matches(Node.getType(), Finder, Builder);
}
/// Matches TagDecl object that are spelled with "struct."
///
/// Example matches S, but not C, U or E.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isStruct) {
return Node.isStruct();
}
/// Matches TagDecl object that are spelled with "union."
///
/// Example matches U, but not C, S or E.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isUnion) {
return Node.isUnion();
}
/// Matches TagDecl object that are spelled with "class."
///
/// Example matches C, but not S, U or E.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isClass) {
return Node.isClass();
}
/// Matches TagDecl object that are spelled with "enum."
///
/// Example matches E, but not C, S or U.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isEnum) {
return Node.isEnum();
}
/// Matches the true branch expression of a conditional operator.
///
/// Example 1 (conditional ternary operator): matches a
/// \code
/// condition ? a : b
/// \endcode
///
/// Example 2 (conditional binary operator): matches opaqueValueExpr(condition)
/// \code
/// condition ?: b
/// \endcode
AST_MATCHER_P(AbstractConditionalOperator, hasTrueExpression,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *Expression = Node.getTrueExpr();
return (Expression != nullptr &&
InnerMatcher.matches(*Expression, Finder, Builder));
}
/// Matches the false branch expression of a conditional operator
/// (binary or ternary).
///
/// Example matches b
/// \code
/// condition ? a : b
/// condition ?: b
/// \endcode
AST_MATCHER_P(AbstractConditionalOperator, hasFalseExpression,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *Expression = Node.getFalseExpr();
return (Expression != nullptr &&
InnerMatcher.matches(*Expression, Finder, Builder));
}
/// Matches if a declaration has a body attached.
///
/// Example matches A, va, fa
/// \code
/// class A {};
/// class B; // Doesn't match, as it has no body.
/// int va;
/// extern int vb; // Doesn't match, as it doesn't define the variable.
/// void fa() {}
/// void fb(); // Doesn't match, as it has no body.
/// @interface X
/// - (void)ma; // Doesn't match, interface is declaration.
/// @end
/// @implementation X
/// - (void)ma {}
/// @end
/// \endcode
///
/// Usable as: Matcher<TagDecl>, Matcher<VarDecl>, Matcher<FunctionDecl>,
/// Matcher<ObjCMethodDecl>
AST_POLYMORPHIC_MATCHER(isDefinition,
AST_POLYMORPHIC_SUPPORTED_TYPES(TagDecl, VarDecl,
ObjCMethodDecl,
FunctionDecl)) {
return Node.isThisDeclarationADefinition();
}
/// Matches if a function declaration is variadic.
///
/// Example matches f, but not g or h. The function i will not match, even when
/// compiled in C mode.
/// \code
/// void f(...);
/// void g(int);
/// template <typename... Ts> void h(Ts...);
/// void i();
/// \endcode
AST_MATCHER(FunctionDecl, isVariadic) {
return Node.isVariadic();
}
/// Matches the class declaration that the given method declaration
/// belongs to.
///
/// FIXME: Generalize this for other kinds of declarations.
/// FIXME: What other kind of declarations would we need to generalize
/// this to?
///
/// Example matches A() in the last line
/// (matcher = cxxConstructExpr(hasDeclaration(cxxMethodDecl(
/// ofClass(hasName("A"))))))
/// \code
/// class A {
/// public:
/// A();
/// };
/// A a = A();
/// \endcode
AST_MATCHER_P(CXXMethodDecl, ofClass,
internal::Matcher<CXXRecordDecl>, InnerMatcher) {
const CXXRecordDecl *Parent = Node.getParent();
return (Parent != nullptr &&
InnerMatcher.matches(*Parent, Finder, Builder));
}
/// Matches each method overridden by the given method. This matcher may
/// produce multiple matches.
///
/// Given
/// \code
/// class A { virtual void f(); };
/// class B : public A { void f(); };
/// class C : public B { void f(); };
/// \endcode
/// cxxMethodDecl(ofClass(hasName("C")),
/// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d")
/// matches once, with "b" binding "A::f" and "d" binding "C::f" (Note
/// that B::f is not overridden by C::f).
///
/// The check can produce multiple matches in case of multiple inheritance, e.g.
/// \code
/// class A1 { virtual void f(); };
/// class A2 { virtual void f(); };
/// class C : public A1, public A2 { void f(); };
/// \endcode
/// cxxMethodDecl(ofClass(hasName("C")),
/// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d")
/// matches twice, once with "b" binding "A1::f" and "d" binding "C::f", and
/// once with "b" binding "A2::f" and "d" binding "C::f".
AST_MATCHER_P(CXXMethodDecl, forEachOverridden,
internal::Matcher<CXXMethodDecl>, InnerMatcher) {
BoundNodesTreeBuilder Result;
bool Matched = false;
for (const auto *Overridden : Node.overridden_methods()) {
BoundNodesTreeBuilder OverriddenBuilder(*Builder);
const bool OverriddenMatched =
InnerMatcher.matches(*Overridden, Finder, &OverriddenBuilder);
if (OverriddenMatched) {
Matched = true;
Result.addMatch(OverriddenBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches declarations of virtual methods and C++ base specifers that specify
/// virtual inheritance.
///
/// Example:
/// \code
/// class A {
/// public:
/// virtual void x(); // matches x
/// };
/// \endcode
///
/// Example:
/// \code
/// class Base {};
/// class DirectlyDerived : virtual Base {}; // matches Base
/// class IndirectlyDerived : DirectlyDerived, Base {}; // matches Base
/// \endcode
///
/// Usable as: Matcher<CXXMethodDecl>, Matcher<CXXBaseSpecifier>
AST_POLYMORPHIC_MATCHER(isVirtual,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXMethodDecl,
CXXBaseSpecifier)) {
return Node.isVirtual();
}
/// Matches if the given method declaration has an explicit "virtual".
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x();
/// };
/// class B : public A {
/// public:
/// void x();
/// };
/// \endcode
/// matches A::x but not B::x
AST_MATCHER(CXXMethodDecl, isVirtualAsWritten) {
return Node.isVirtualAsWritten();
}
/// Matches if the given method or class declaration is final.
///
/// Given:
/// \code
/// class A final {};
///
/// struct B {
/// virtual void f();
/// };
///
/// struct C : B {
/// void f() final;
/// };
/// \endcode
/// matches A and C::f, but not B, C, or B::f
AST_POLYMORPHIC_MATCHER(isFinal,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl,
CXXMethodDecl)) {
return Node.template hasAttr<FinalAttr>();
}
/// Matches if the given method declaration is pure.
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x() = 0;
/// };
/// \endcode
/// matches A::x
AST_MATCHER(CXXMethodDecl, isPure) {
return Node.isPure();
}
/// Matches if the given method declaration is const.
///
/// Given
/// \code
/// struct A {
/// void foo() const;
/// void bar();
/// };
/// \endcode
///
/// cxxMethodDecl(isConst()) matches A::foo() but not A::bar()
AST_MATCHER(CXXMethodDecl, isConst) {
return Node.isConst();
}
/// Matches if the given method declaration declares a copy assignment
/// operator.
///
/// Given
/// \code
/// struct A {
/// A &operator=(const A &);
/// A &operator=(A &&);
/// };
/// \endcode
///
/// cxxMethodDecl(isCopyAssignmentOperator()) matches the first method but not
/// the second one.
AST_MATCHER(CXXMethodDecl, isCopyAssignmentOperator) {
return Node.isCopyAssignmentOperator();
}
/// Matches if the given method declaration declares a move assignment
/// operator.
///
/// Given
/// \code
/// struct A {
/// A &operator=(const A &);
/// A &operator=(A &&);
/// };
/// \endcode
///
/// cxxMethodDecl(isMoveAssignmentOperator()) matches the second method but not
/// the first one.
AST_MATCHER(CXXMethodDecl, isMoveAssignmentOperator) {
return Node.isMoveAssignmentOperator();
}
/// Matches if the given method declaration overrides another method.
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x();
/// };
/// class B : public A {
/// public:
/// virtual void x();
/// };
/// \endcode
/// matches B::x
AST_MATCHER(CXXMethodDecl, isOverride) {
return Node.size_overridden_methods() > 0 || Node.hasAttr<OverrideAttr>();
}
/// Matches method declarations that are user-provided.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &) = default; // #2
/// S(S &&) = delete; // #3
/// };
/// \endcode
/// cxxConstructorDecl(isUserProvided()) will match #1, but not #2 or #3.
AST_MATCHER(CXXMethodDecl, isUserProvided) {
return Node.isUserProvided();
}
/// Matches member expressions that are called with '->' as opposed
/// to '.'.
///
/// Member calls on the implicit this pointer match as called with '->'.
///
/// Given
/// \code
/// class Y {
/// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; }
/// template <class T> void f() { this->f<T>(); f<T>(); }
/// int a;
/// static int b;
/// };
/// template <class T>
/// class Z {
/// void x() { this->m; }
/// };
/// \endcode
/// memberExpr(isArrow())
/// matches this->x, x, y.x, a, this->b
/// cxxDependentScopeMemberExpr(isArrow())
/// matches this->m
/// unresolvedMemberExpr(isArrow())
/// matches this->f<T>, f<T>
AST_POLYMORPHIC_MATCHER(
isArrow, AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr,
CXXDependentScopeMemberExpr)) {
return Node.isArrow();
}
/// Matches QualType nodes that are of integer type.
///
/// Given
/// \code
/// void a(int);
/// void b(long);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isInteger())))
/// matches "a(int)", "b(long)", but not "c(double)".
AST_MATCHER(QualType, isInteger) {
return Node->isIntegerType();
}
/// Matches QualType nodes that are of unsigned integer type.
///
/// Given
/// \code
/// void a(int);
/// void b(unsigned long);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isUnsignedInteger())))
/// matches "b(unsigned long)", but not "a(int)" and "c(double)".
AST_MATCHER(QualType, isUnsignedInteger) {
return Node->isUnsignedIntegerType();
}
/// Matches QualType nodes that are of signed integer type.
///
/// Given
/// \code
/// void a(int);
/// void b(unsigned long);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isSignedInteger())))
/// matches "a(int)", but not "b(unsigned long)" and "c(double)".
AST_MATCHER(QualType, isSignedInteger) {
return Node->isSignedIntegerType();
}
/// Matches QualType nodes that are of character type.
///
/// Given
/// \code
/// void a(char);
/// void b(wchar_t);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isAnyCharacter())))
/// matches "a(char)", "b(wchar_t)", but not "c(double)".
AST_MATCHER(QualType, isAnyCharacter) {
return Node->isAnyCharacterType();
}
/// Matches QualType nodes that are of any pointer type; this includes
/// the Objective-C object pointer type, which is different despite being
/// syntactically similar.
///
/// Given
/// \code
/// int *i = nullptr;
///
/// @interface Foo
/// @end
/// Foo *f;
///
/// int j;
/// \endcode
/// varDecl(hasType(isAnyPointer()))
/// matches "int *i" and "Foo *f", but not "int j".
AST_MATCHER(QualType, isAnyPointer) {
return Node->isAnyPointerType();
}
/// Matches QualType nodes that are const-qualified, i.e., that
/// include "top-level" const.
///
/// Given
/// \code
/// void a(int);
/// void b(int const);
/// void c(const int);
/// void d(const int*);
/// void e(int const) {};
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isConstQualified())))
/// matches "void b(int const)", "void c(const int)" and
/// "void e(int const) {}". It does not match d as there
/// is no top-level const on the parameter type "const int *".
AST_MATCHER(QualType, isConstQualified) {
return Node.isConstQualified();
}
/// Matches QualType nodes that are volatile-qualified, i.e., that
/// include "top-level" volatile.
///
/// Given
/// \code
/// void a(int);
/// void b(int volatile);
/// void c(volatile int);
/// void d(volatile int*);
/// void e(int volatile) {};
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isVolatileQualified())))
/// matches "void b(int volatile)", "void c(volatile int)" and
/// "void e(int volatile) {}". It does not match d as there
/// is no top-level volatile on the parameter type "volatile int *".
AST_MATCHER(QualType, isVolatileQualified) {
return Node.isVolatileQualified();
}
/// Matches QualType nodes that have local CV-qualifiers attached to
/// the node, not hidden within a typedef.
///
/// Given
/// \code
/// typedef const int const_int;
/// const_int i;
/// int *const j;
/// int *volatile k;
/// int m;
/// \endcode
/// \c varDecl(hasType(hasLocalQualifiers())) matches only \c j and \c k.
/// \c i is const-qualified but the qualifier is not local.
AST_MATCHER(QualType, hasLocalQualifiers) {
return Node.hasLocalQualifiers();
}
/// Matches a member expression where the member is matched by a
/// given matcher.
///
/// Given
/// \code
/// struct { int first, second; } first, second;
/// int i(second.first);
/// int j(first.second);
/// \endcode
/// memberExpr(member(hasName("first")))
/// matches second.first
/// but not first.second (because the member name there is "second").
AST_MATCHER_P(MemberExpr, member,
internal::Matcher<ValueDecl>, InnerMatcher) {
return InnerMatcher.matches(*Node.getMemberDecl(), Finder, Builder);
}
/// Matches a member expression where the object expression is matched by a
/// given matcher. Implicit object expressions are included; that is, it matches
/// use of implicit `this`.
///
/// Given
/// \code
/// struct X {
/// int m;
/// int f(X x) { x.m; return m; }
/// };
/// \endcode
/// memberExpr(hasObjectExpression(hasType(cxxRecordDecl(hasName("X")))))
/// matches `x.m`, but not `m`; however,
/// memberExpr(hasObjectExpression(hasType(pointsTo(
// cxxRecordDecl(hasName("X"))))))
/// matches `m` (aka. `this->m`), but not `x.m`.
AST_POLYMORPHIC_MATCHER_P(
hasObjectExpression,
AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr,
CXXDependentScopeMemberExpr),
internal::Matcher<Expr>, InnerMatcher) {
if (const auto *E = dyn_cast<UnresolvedMemberExpr>(&Node))
if (E->isImplicitAccess())
return false;
if (const auto *E = dyn_cast<CXXDependentScopeMemberExpr>(&Node))
if (E->isImplicitAccess())
return false;
return InnerMatcher.matches(*Node.getBase(), Finder, Builder);
}
/// Matches any using shadow declaration.
///
/// Given
/// \code
/// namespace X { void b(); }
/// using X::b;
/// \endcode
/// usingDecl(hasAnyUsingShadowDecl(hasName("b"))))
/// matches \code using X::b \endcode
AST_MATCHER_P(UsingDecl, hasAnyUsingShadowDecl,
internal::Matcher<UsingShadowDecl>, InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.shadow_begin(),
Node.shadow_end(), Finder, Builder);
}
/// Matches a using shadow declaration where the target declaration is
/// matched by the given matcher.
///
/// Given
/// \code
/// namespace X { int a; void b(); }
/// using X::a;
/// using X::b;
/// \endcode
/// usingDecl(hasAnyUsingShadowDecl(hasTargetDecl(functionDecl())))
/// matches \code using X::b \endcode
/// but not \code using X::a \endcode
AST_MATCHER_P(UsingShadowDecl, hasTargetDecl,
internal::Matcher<NamedDecl>, InnerMatcher) {
return InnerMatcher.matches(*Node.getTargetDecl(), Finder, Builder);
}
/// Matches template instantiations of function, class, or static
/// member variable template instantiations.
///
/// Given
/// \code
/// template <typename T> class X {}; class A {}; X<A> x;
/// \endcode
/// or
/// \code
/// template <typename T> class X {}; class A {}; template class X<A>;
/// \endcode
/// or
/// \code
/// template <typename T> class X {}; class A {}; extern template class X<A>;
/// \endcode
/// cxxRecordDecl(hasName("::X"), isTemplateInstantiation())
/// matches the template instantiation of X<A>.
///
/// But given
/// \code
/// template <typename T> class X {}; class A {};
/// template <> class X<A> {}; X<A> x;
/// \endcode
/// cxxRecordDecl(hasName("::X"), isTemplateInstantiation())
/// does not match, as X<A> is an explicit template specialization.
///
/// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl>
AST_POLYMORPHIC_MATCHER(isTemplateInstantiation,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl,
CXXRecordDecl)) {
return (Node.getTemplateSpecializationKind() == TSK_ImplicitInstantiation ||
Node.getTemplateSpecializationKind() ==
TSK_ExplicitInstantiationDefinition ||
Node.getTemplateSpecializationKind() ==
TSK_ExplicitInstantiationDeclaration);
}
/// Matches declarations that are template instantiations or are inside
/// template instantiations.
///
/// Given
/// \code
/// template<typename T> void A(T t) { T i; }
/// A(0);
/// A(0U);
/// \endcode
/// functionDecl(isInstantiated())
/// matches 'A(int) {...};' and 'A(unsigned) {...}'.
AST_MATCHER_FUNCTION(internal::Matcher<Decl>, isInstantiated) {
auto IsInstantiation = decl(anyOf(cxxRecordDecl(isTemplateInstantiation()),
functionDecl(isTemplateInstantiation())));
return decl(anyOf(IsInstantiation, hasAncestor(IsInstantiation)));
}
/// Matches statements inside of a template instantiation.
///
/// Given
/// \code
/// int j;
/// template<typename T> void A(T t) { T i; j += 42;}
/// A(0);
/// A(0U);
/// \endcode
/// declStmt(isInTemplateInstantiation())
/// matches 'int i;' and 'unsigned i'.
/// unless(stmt(isInTemplateInstantiation()))
/// will NOT match j += 42; as it's shared between the template definition and
/// instantiation.
AST_MATCHER_FUNCTION(internal::Matcher<Stmt>, isInTemplateInstantiation) {
return stmt(
hasAncestor(decl(anyOf(cxxRecordDecl(isTemplateInstantiation()),
functionDecl(isTemplateInstantiation())))));
}
/// Matches explicit template specializations of function, class, or
/// static member variable template instantiations.
///
/// Given
/// \code
/// template<typename T> void A(T t) { }
/// template<> void A(int N) { }
/// \endcode
/// functionDecl(isExplicitTemplateSpecialization())
/// matches the specialization A<int>().
///
/// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl>
AST_POLYMORPHIC_MATCHER(isExplicitTemplateSpecialization,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl,
CXXRecordDecl)) {
return (Node.getTemplateSpecializationKind() == TSK_ExplicitSpecialization);
}
/// Matches \c TypeLocs for which the given inner
/// QualType-matcher matches.
AST_MATCHER_FUNCTION_P_OVERLOAD(internal::BindableMatcher<TypeLoc>, loc,
internal::Matcher<QualType>, InnerMatcher, 0) {
return internal::BindableMatcher<TypeLoc>(
new internal::TypeLocTypeMatcher(InnerMatcher));
}
/// Matches type \c bool.
///
/// Given
/// \code
/// struct S { bool func(); };
/// \endcode
/// functionDecl(returns(booleanType()))
/// matches "bool func();"
AST_MATCHER(Type, booleanType) {
return Node.isBooleanType();
}
/// Matches type \c void.
///
/// Given
/// \code
/// struct S { void func(); };
/// \endcode
/// functionDecl(returns(voidType()))
/// matches "void func();"
AST_MATCHER(Type, voidType) {
return Node.isVoidType();
}
template <typename NodeType>
using AstTypeMatcher = internal::VariadicDynCastAllOfMatcher<Type, NodeType>;
/// Matches builtin Types.
///
/// Given
/// \code
/// struct A {};
/// A a;
/// int b;
/// float c;
/// bool d;
/// \endcode
/// builtinType()
/// matches "int b", "float c" and "bool d"
extern const AstTypeMatcher<BuiltinType> builtinType;
/// Matches all kinds of arrays.
///
/// Given
/// \code
/// int a[] = { 2, 3 };
/// int b[4];
/// void f() { int c[a[0]]; }
/// \endcode
/// arrayType()
/// matches "int a[]", "int b[4]" and "int c[a[0]]";
extern const AstTypeMatcher<ArrayType> arrayType;
/// Matches C99 complex types.
///
/// Given
/// \code
/// _Complex float f;
/// \endcode
/// complexType()
/// matches "_Complex float f"
extern const AstTypeMatcher<ComplexType> complexType;
/// Matches any real floating-point type (float, double, long double).
///
/// Given
/// \code
/// int i;
/// float f;
/// \endcode
/// realFloatingPointType()
/// matches "float f" but not "int i"
AST_MATCHER(Type, realFloatingPointType) {
return Node.isRealFloatingType();
}
/// Matches arrays and C99 complex types that have a specific element
/// type.
///
/// Given
/// \code
/// struct A {};
/// A a[7];
/// int b[7];
/// \endcode
/// arrayType(hasElementType(builtinType()))
/// matches "int b[7]"
///
/// Usable as: Matcher<ArrayType>, Matcher<ComplexType>
AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasElementType, getElement,
AST_POLYMORPHIC_SUPPORTED_TYPES(ArrayType,
ComplexType));
/// Matches C arrays with a specified constant size.
///
/// Given
/// \code
/// void() {
/// int a[2];
/// int b[] = { 2, 3 };
/// int c[b[0]];
/// }
/// \endcode
/// constantArrayType()
/// matches "int a[2]"
extern const AstTypeMatcher<ConstantArrayType> constantArrayType;
/// Matches nodes that have the specified size.
///
/// Given
/// \code
/// int a[42];
/// int b[2 * 21];
/// int c[41], d[43];
/// char *s = "abcd";
/// wchar_t *ws = L"abcd";
/// char *w = "a";
/// \endcode
/// constantArrayType(hasSize(42))
/// matches "int a[42]" and "int b[2 * 21]"
/// stringLiteral(hasSize(4))
/// matches "abcd", L"abcd"
AST_POLYMORPHIC_MATCHER_P(hasSize,
AST_POLYMORPHIC_SUPPORTED_TYPES(ConstantArrayType,
StringLiteral),
unsigned, N) {
return internal::HasSizeMatcher<NodeType>::hasSize(Node, N);
}
/// Matches C++ arrays whose size is a value-dependent expression.
///
/// Given
/// \code
/// template<typename T, int Size>
/// class array {
/// T data[Size];
/// };
/// \endcode
/// dependentSizedArrayType
/// matches "T data[Size]"
extern const AstTypeMatcher<DependentSizedArrayType> dependentSizedArrayType;
/// Matches C arrays with unspecified size.
///
/// Given
/// \code
/// int a[] = { 2, 3 };
/// int b[42];
/// void f(int c[]) { int d[a[0]]; };
/// \endcode
/// incompleteArrayType()
/// matches "int a[]" and "int c[]"
extern const AstTypeMatcher<IncompleteArrayType> incompleteArrayType;
/// Matches C arrays with a specified size that is not an
/// integer-constant-expression.
///
/// Given
/// \code
/// void f() {
/// int a[] = { 2, 3 }
/// int b[42];
/// int c[a[0]];
/// }
/// \endcode
/// variableArrayType()
/// matches "int c[a[0]]"
extern const AstTypeMatcher<VariableArrayType> variableArrayType;
/// Matches \c VariableArrayType nodes that have a specific size
/// expression.
///
/// Given
/// \code
/// void f(int b) {
/// int a[b];
/// }
/// \endcode
/// variableArrayType(hasSizeExpr(ignoringImpCasts(declRefExpr(to(
/// varDecl(hasName("b")))))))
/// matches "int a[b]"
AST_MATCHER_P(VariableArrayType, hasSizeExpr,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.getSizeExpr(), Finder, Builder);
}
/// Matches atomic types.
///
/// Given
/// \code
/// _Atomic(int) i;
/// \endcode
/// atomicType()
/// matches "_Atomic(int) i"
extern const AstTypeMatcher<AtomicType> atomicType;
/// Matches atomic types with a specific value type.
///
/// Given
/// \code
/// _Atomic(int) i;
/// _Atomic(float) f;
/// \endcode
/// atomicType(hasValueType(isInteger()))
/// matches "_Atomic(int) i"
///
/// Usable as: Matcher<AtomicType>
AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasValueType, getValue,
AST_POLYMORPHIC_SUPPORTED_TYPES(AtomicType));
/// Matches types nodes representing C++11 auto types.
///
/// Given:
/// \code
/// auto n = 4;
/// int v[] = { 2, 3 }
/// for (auto i : v) { }
/// \endcode
/// autoType()
/// matches "auto n" and "auto i"
extern const AstTypeMatcher<AutoType> autoType;
/// Matches types nodes representing C++11 decltype(<expr>) types.
///
/// Given:
/// \code
/// short i = 1;
/// int j = 42;
/// decltype(i + j) result = i + j;
/// \endcode
/// decltypeType()
/// matches "decltype(i + j)"
extern const AstTypeMatcher<DecltypeType> decltypeType;
/// Matches \c AutoType nodes where the deduced type is a specific type.
///
/// Note: There is no \c TypeLoc for the deduced type and thus no
/// \c getDeducedLoc() matcher.
///
/// Given
/// \code
/// auto a = 1;
/// auto b = 2.0;
/// \endcode
/// autoType(hasDeducedType(isInteger()))
/// matches "auto a"
///
/// Usable as: Matcher<AutoType>
AST_TYPE_TRAVERSE_MATCHER(hasDeducedType, getDeducedType,
AST_POLYMORPHIC_SUPPORTED_TYPES(AutoType));
/// Matches \c DecltypeType nodes to find out the underlying type.
///
/// Given
/// \code
/// decltype(1) a = 1;
/// decltype(2.0) b = 2.0;
/// \endcode
/// decltypeType(hasUnderlyingType(isInteger()))
/// matches the type of "a"
///
/// Usable as: Matcher<DecltypeType>
AST_TYPE_TRAVERSE_MATCHER(hasUnderlyingType, getUnderlyingType,
AST_POLYMORPHIC_SUPPORTED_TYPES(DecltypeType));
/// Matches \c FunctionType nodes.
///
/// Given
/// \code
/// int (*f)(int);
/// void g();
/// \endcode
/// functionType()
/// matches "int (*f)(int)" and the type of "g".
extern const AstTypeMatcher<FunctionType> functionType;
/// Matches \c FunctionProtoType nodes.
///
/// Given
/// \code
/// int (*f)(int);
/// void g();
/// \endcode
/// functionProtoType()
/// matches "int (*f)(int)" and the type of "g" in C++ mode.
/// In C mode, "g" is not matched because it does not contain a prototype.
extern const AstTypeMatcher<FunctionProtoType> functionProtoType;
/// Matches \c ParenType nodes.
///
/// Given
/// \code
/// int (*ptr_to_array)[4];
/// int *array_of_ptrs[4];
/// \endcode
///
/// \c varDecl(hasType(pointsTo(parenType()))) matches \c ptr_to_array but not
/// \c array_of_ptrs.
extern const AstTypeMatcher<ParenType> parenType;
/// Matches \c ParenType nodes where the inner type is a specific type.
///
/// Given
/// \code
/// int (*ptr_to_array)[4];
/// int (*ptr_to_func)(int);
/// \endcode
///
/// \c varDecl(hasType(pointsTo(parenType(innerType(functionType()))))) matches
/// \c ptr_to_func but not \c ptr_to_array.
///
/// Usable as: Matcher<ParenType>
AST_TYPE_TRAVERSE_MATCHER(innerType, getInnerType,
AST_POLYMORPHIC_SUPPORTED_TYPES(ParenType));
/// Matches block pointer types, i.e. types syntactically represented as
/// "void (^)(int)".
///
/// The \c pointee is always required to be a \c FunctionType.
extern const AstTypeMatcher<BlockPointerType> blockPointerType;
/// Matches member pointer types.
/// Given
/// \code
/// struct A { int i; }
/// A::* ptr = A::i;
/// \endcode
/// memberPointerType()
/// matches "A::* ptr"
extern const AstTypeMatcher<MemberPointerType> memberPointerType;
/// Matches pointer types, but does not match Objective-C object pointer
/// types.
///
/// Given
/// \code
/// int *a;
/// int &b = *a;
/// int c = 5;
///
/// @interface Foo
/// @end
/// Foo *f;
/// \endcode
/// pointerType()
/// matches "int *a", but does not match "Foo *f".
extern const AstTypeMatcher<PointerType> pointerType;
/// Matches an Objective-C object pointer type, which is different from
/// a pointer type, despite being syntactically similar.
///
/// Given
/// \code
/// int *a;
///
/// @interface Foo
/// @end
/// Foo *f;
/// \endcode
/// pointerType()
/// matches "Foo *f", but does not match "int *a".
extern const AstTypeMatcher<ObjCObjectPointerType> objcObjectPointerType;
/// Matches both lvalue and rvalue reference types.
///
/// Given
/// \code
/// int *a;
/// int &b = *a;
/// int &&c = 1;
/// auto &d = b;
/// auto &&e = c;
/// auto &&f = 2;
/// int g = 5;
/// \endcode
///
/// \c referenceType() matches the types of \c b, \c c, \c d, \c e, and \c f.
extern const AstTypeMatcher<ReferenceType> referenceType;
/// Matches lvalue reference types.
///
/// Given:
/// \code
/// int *a;
/// int &b = *a;
/// int &&c = 1;
/// auto &d = b;
/// auto &&e = c;
/// auto &&f = 2;
/// int g = 5;
/// \endcode
///
/// \c lValueReferenceType() matches the types of \c b, \c d, and \c e. \c e is
/// matched since the type is deduced as int& by reference collapsing rules.
extern const AstTypeMatcher<LValueReferenceType> lValueReferenceType;
/// Matches rvalue reference types.
///
/// Given:
/// \code
/// int *a;
/// int &b = *a;
/// int &&c = 1;
/// auto &d = b;
/// auto &&e = c;
/// auto &&f = 2;
/// int g = 5;
/// \endcode
///
/// \c rValueReferenceType() matches the types of \c c and \c f. \c e is not
/// matched as it is deduced to int& by reference collapsing rules.
extern const AstTypeMatcher<RValueReferenceType> rValueReferenceType;
/// Narrows PointerType (and similar) matchers to those where the
/// \c pointee matches a given matcher.
///
/// Given
/// \code
/// int *a;
/// int const *b;
/// float const *f;
/// \endcode
/// pointerType(pointee(isConstQualified(), isInteger()))
/// matches "int const *b"
///
/// Usable as: Matcher<BlockPointerType>, Matcher<MemberPointerType>,
/// Matcher<PointerType>, Matcher<ReferenceType>
AST_TYPELOC_TRAVERSE_MATCHER_DECL(
pointee, getPointee,
AST_POLYMORPHIC_SUPPORTED_TYPES(BlockPointerType, MemberPointerType,
PointerType, ReferenceType));
/// Matches typedef types.
///
/// Given
/// \code
/// typedef int X;
/// \endcode
/// typedefType()
/// matches "typedef int X"
extern const AstTypeMatcher<TypedefType> typedefType;
/// Matches enum types.
///
/// Given
/// \code
/// enum C { Green };
/// enum class S { Red };
///
/// C c;
/// S s;
/// \endcode
//
/// \c enumType() matches the type of the variable declarations of both \c c and
/// \c s.
extern const AstTypeMatcher<EnumType> enumType;
/// Matches template specialization types.
///
/// Given
/// \code
/// template <typename T>
/// class C { };
///
/// template class C<int>; // A
/// C<char> var; // B
/// \endcode
///
/// \c templateSpecializationType() matches the type of the explicit
/// instantiation in \c A and the type of the variable declaration in \c B.
extern const AstTypeMatcher<TemplateSpecializationType>
templateSpecializationType;
/// Matches C++17 deduced template specialization types, e.g. deduced class
/// template types.
///
/// Given
/// \code
/// template <typename T>
/// class C { public: C(T); };
///
/// C c(123);
/// \endcode
/// \c deducedTemplateSpecializationType() matches the type in the declaration
/// of the variable \c c.
extern const AstTypeMatcher<DeducedTemplateSpecializationType>
deducedTemplateSpecializationType;
/// Matches types nodes representing unary type transformations.
///
/// Given:
/// \code
/// typedef __underlying_type(T) type;
/// \endcode
/// unaryTransformType()
/// matches "__underlying_type(T)"
extern const AstTypeMatcher<UnaryTransformType> unaryTransformType;
/// Matches record types (e.g. structs, classes).
///
/// Given
/// \code
/// class C {};
/// struct S {};
///
/// C c;
/// S s;
/// \endcode
///
/// \c recordType() matches the type of the variable declarations of both \c c
/// and \c s.
extern const AstTypeMatcher<RecordType> recordType;
/// Matches tag types (record and enum types).
///
/// Given
/// \code
/// enum E {};
/// class C {};
///
/// E e;
/// C c;
/// \endcode
///
/// \c tagType() matches the type of the variable declarations of both \c e
/// and \c c.
extern const AstTypeMatcher<TagType> tagType;
/// Matches types specified with an elaborated type keyword or with a
/// qualified name.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// class C {};
///
/// class C c;
/// N::M::D d;
/// \endcode
///
/// \c elaboratedType() matches the type of the variable declarations of both
/// \c c and \c d.
extern const AstTypeMatcher<ElaboratedType> elaboratedType;
/// Matches ElaboratedTypes whose qualifier, a NestedNameSpecifier,
/// matches \c InnerMatcher if the qualifier exists.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// N::M::D d;
/// \endcode
///
/// \c elaboratedType(hasQualifier(hasPrefix(specifiesNamespace(hasName("N"))))
/// matches the type of the variable declaration of \c d.
AST_MATCHER_P(ElaboratedType, hasQualifier,
internal::Matcher<NestedNameSpecifier>, InnerMatcher) {
if (const NestedNameSpecifier *Qualifier = Node.getQualifier())
return InnerMatcher.matches(*Qualifier, Finder, Builder);
return false;
}
/// Matches ElaboratedTypes whose named type matches \c InnerMatcher.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// N::M::D d;
/// \endcode
///
/// \c elaboratedType(namesType(recordType(
/// hasDeclaration(namedDecl(hasName("D")))))) matches the type of the variable
/// declaration of \c d.
AST_MATCHER_P(ElaboratedType, namesType, internal::Matcher<QualType>,
InnerMatcher) {
return InnerMatcher.matches(Node.getNamedType(), Finder, Builder);
}
/// Matches types that represent the result of substituting a type for a
/// template type parameter.
///
/// Given
/// \code
/// template <typename T>
/// void F(T t) {
/// int i = 1 + t;
/// }
/// \endcode
///
/// \c substTemplateTypeParmType() matches the type of 't' but not '1'
extern const AstTypeMatcher<SubstTemplateTypeParmType>
substTemplateTypeParmType;
/// Matches template type parameter substitutions that have a replacement
/// type that matches the provided matcher.
///
/// Given
/// \code
/// template <typename T>
/// double F(T t);
/// int i;
/// double j = F(i);
/// \endcode
///
/// \c substTemplateTypeParmType(hasReplacementType(type())) matches int
AST_TYPE_TRAVERSE_MATCHER(
hasReplacementType, getReplacementType,
AST_POLYMORPHIC_SUPPORTED_TYPES(SubstTemplateTypeParmType));
/// Matches template type parameter types.
///
/// Example matches T, but not int.
/// (matcher = templateTypeParmType())
/// \code
/// template <typename T> void f(int i);
/// \endcode
extern const AstTypeMatcher<TemplateTypeParmType> templateTypeParmType;
/// Matches injected class name types.
///
/// Example matches S s, but not S<T> s.
/// (matcher = parmVarDecl(hasType(injectedClassNameType())))
/// \code
/// template <typename T> struct S {
/// void f(S s);
/// void g(S<T> s);
/// };
/// \endcode
extern const AstTypeMatcher<InjectedClassNameType> injectedClassNameType;
/// Matches decayed type
/// Example matches i[] in declaration of f.
/// (matcher = valueDecl(hasType(decayedType(hasDecayedType(pointerType())))))
/// Example matches i[1].
/// (matcher = expr(hasType(decayedType(hasDecayedType(pointerType())))))
/// \code
/// void f(int i[]) {
/// i[1] = 0;
/// }
/// \endcode
extern const AstTypeMatcher<DecayedType> decayedType;
/// Matches the decayed type, whos decayed type matches \c InnerMatcher
AST_MATCHER_P(DecayedType, hasDecayedType, internal::Matcher<QualType>,
InnerType) {
return InnerType.matches(Node.getDecayedType(), Finder, Builder);
}
/// Matches declarations whose declaration context, interpreted as a
/// Decl, matches \c InnerMatcher.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// \endcode
///
/// \c cxxRcordDecl(hasDeclContext(namedDecl(hasName("M")))) matches the
/// declaration of \c class \c D.
AST_MATCHER_P(Decl, hasDeclContext, internal::Matcher<Decl>, InnerMatcher) {
const DeclContext *DC = Node.getDeclContext();
if (!DC) return false;
return InnerMatcher.matches(*Decl::castFromDeclContext(DC), Finder, Builder);
}
/// Matches nested name specifiers.
///
/// Given
/// \code
/// namespace ns {
/// struct A { static void f(); };
/// void A::f() {}
/// void g() { A::f(); }
/// }
/// ns::A a;
/// \endcode
/// nestedNameSpecifier()
/// matches "ns::" and both "A::"
extern const internal::VariadicAllOfMatcher<NestedNameSpecifier>
nestedNameSpecifier;
/// Same as \c nestedNameSpecifier but matches \c NestedNameSpecifierLoc.
extern const internal::VariadicAllOfMatcher<NestedNameSpecifierLoc>
nestedNameSpecifierLoc;
/// Matches \c NestedNameSpecifierLocs for which the given inner
/// NestedNameSpecifier-matcher matches.
AST_MATCHER_FUNCTION_P_OVERLOAD(
internal::BindableMatcher<NestedNameSpecifierLoc>, loc,
internal::Matcher<NestedNameSpecifier>, InnerMatcher, 1) {
return internal::BindableMatcher<NestedNameSpecifierLoc>(
new internal::LocMatcher<NestedNameSpecifierLoc, NestedNameSpecifier>(
InnerMatcher));
}
/// Matches nested name specifiers that specify a type matching the
/// given \c QualType matcher without qualifiers.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifier(specifiesType(
/// hasDeclaration(cxxRecordDecl(hasName("A")))
/// ))
/// matches "A::"
AST_MATCHER_P(NestedNameSpecifier, specifiesType,
internal::Matcher<QualType>, InnerMatcher) {
if (!Node.getAsType())
return false;
return InnerMatcher.matches(QualType(Node.getAsType(), 0), Finder, Builder);
}
/// Matches nested name specifier locs that specify a type matching the
/// given \c TypeLoc.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifierLoc(specifiesTypeLoc(loc(type(
/// hasDeclaration(cxxRecordDecl(hasName("A")))))))
/// matches "A::"
AST_MATCHER_P(NestedNameSpecifierLoc, specifiesTypeLoc,
internal::Matcher<TypeLoc>, InnerMatcher) {
return Node && Node.getNestedNameSpecifier()->getAsType() &&
InnerMatcher.matches(Node.getTypeLoc(), Finder, Builder);
}
/// Matches on the prefix of a \c NestedNameSpecifier.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifier(hasPrefix(specifiesType(asString("struct A")))) and
/// matches "A::"
AST_MATCHER_P_OVERLOAD(NestedNameSpecifier, hasPrefix,
internal::Matcher<NestedNameSpecifier>, InnerMatcher,
0) {
const NestedNameSpecifier *NextNode = Node.getPrefix();
if (!NextNode)
return false;
return InnerMatcher.matches(*NextNode, Finder, Builder);
}
/// Matches on the prefix of a \c NestedNameSpecifierLoc.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifierLoc(hasPrefix(loc(specifiesType(asString("struct A")))))
/// matches "A::"
AST_MATCHER_P_OVERLOAD(NestedNameSpecifierLoc, hasPrefix,
internal::Matcher<NestedNameSpecifierLoc>, InnerMatcher,
1) {
NestedNameSpecifierLoc NextNode = Node.getPrefix();
if (!NextNode)
return false;
return InnerMatcher.matches(NextNode, Finder, Builder);
}
/// Matches nested name specifiers that specify a namespace matching the
/// given namespace matcher.
///
/// Given
/// \code
/// namespace ns { struct A {}; }
/// ns::A a;
/// \endcode
/// nestedNameSpecifier(specifiesNamespace(hasName("ns")))
/// matches "ns::"
AST_MATCHER_P(NestedNameSpecifier, specifiesNamespace,
internal::Matcher<NamespaceDecl>, InnerMatcher) {
if (!Node.getAsNamespace())
return false;
return InnerMatcher.matches(*Node.getAsNamespace(), Finder, Builder);
}
/// Overloads for the \c equalsNode matcher.
/// FIXME: Implement for other node types.
/// @{
/// Matches if a node equals another node.
///
/// \c Decl has pointer identity in the AST.
AST_MATCHER_P_OVERLOAD(Decl, equalsNode, const Decl*, Other, 0) {
return &Node == Other;
}
/// Matches if a node equals another node.
///
/// \c Stmt has pointer identity in the AST.
AST_MATCHER_P_OVERLOAD(Stmt, equalsNode, const Stmt*, Other, 1) {
return &Node == Other;
}
/// Matches if a node equals another node.
///
/// \c Type has pointer identity in the AST.
AST_MATCHER_P_OVERLOAD(Type, equalsNode, const Type*, Other, 2) {
return &Node == Other;
}
/// @}
/// Matches each case or default statement belonging to the given switch
/// statement. This matcher may produce multiple matches.
///
/// Given
/// \code
/// switch (1) { case 1: case 2: default: switch (2) { case 3: case 4: ; } }
/// \endcode
/// switchStmt(forEachSwitchCase(caseStmt().bind("c"))).bind("s")
/// matches four times, with "c" binding each of "case 1:", "case 2:",
/// "case 3:" and "case 4:", and "s" respectively binding "switch (1)",
/// "switch (1)", "switch (2)" and "switch (2)".
AST_MATCHER_P(SwitchStmt, forEachSwitchCase, internal::Matcher<SwitchCase>,
InnerMatcher) {
BoundNodesTreeBuilder Result;
// FIXME: getSwitchCaseList() does not necessarily guarantee a stable
// iteration order. We should use the more general iterating matchers once
// they are capable of expressing this matcher (for example, it should ignore
// case statements belonging to nested switch statements).
bool Matched = false;
for (const SwitchCase *SC = Node.getSwitchCaseList(); SC;
SC = SC->getNextSwitchCase()) {
BoundNodesTreeBuilder CaseBuilder(*Builder);
bool CaseMatched = InnerMatcher.matches(*SC, Finder, &CaseBuilder);
if (CaseMatched) {
Matched = true;
Result.addMatch(CaseBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches each constructor initializer in a constructor definition.
///
/// Given
/// \code
/// class A { A() : i(42), j(42) {} int i; int j; };
/// \endcode
/// cxxConstructorDecl(forEachConstructorInitializer(
/// forField(decl().bind("x"))
/// ))
/// will trigger two matches, binding for 'i' and 'j' respectively.
AST_MATCHER_P(CXXConstructorDecl, forEachConstructorInitializer,
internal::Matcher<CXXCtorInitializer>, InnerMatcher) {
BoundNodesTreeBuilder Result;
bool Matched = false;
for (const auto *I : Node.inits()) {
BoundNodesTreeBuilder InitBuilder(*Builder);
if (InnerMatcher.matches(*I, Finder, &InitBuilder)) {
Matched = true;
Result.addMatch(InitBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches constructor declarations that are copy constructors.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &); // #2
/// S(S &&); // #3
/// };
/// \endcode
/// cxxConstructorDecl(isCopyConstructor()) will match #2, but not #1 or #3.
AST_MATCHER(CXXConstructorDecl, isCopyConstructor) {
return Node.isCopyConstructor();
}
/// Matches constructor declarations that are move constructors.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &); // #2
/// S(S &&); // #3
/// };
/// \endcode
/// cxxConstructorDecl(isMoveConstructor()) will match #3, but not #1 or #2.
AST_MATCHER(CXXConstructorDecl, isMoveConstructor) {
return Node.isMoveConstructor();
}
/// Matches constructor declarations that are default constructors.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &); // #2
/// S(S &&); // #3
/// };
/// \endcode
/// cxxConstructorDecl(isDefaultConstructor()) will match #1, but not #2 or #3.
AST_MATCHER(CXXConstructorDecl, isDefaultConstructor) {
return Node.isDefaultConstructor();
}
/// Matches constructors that delegate to another constructor.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(int) {} // #2
/// S(S &&) : S() {} // #3
/// };
/// S::S() : S(0) {} // #4
/// \endcode
/// cxxConstructorDecl(isDelegatingConstructor()) will match #3 and #4, but not
/// #1 or #2.
AST_MATCHER(CXXConstructorDecl, isDelegatingConstructor) {
return Node.isDelegatingConstructor();
}
/// Matches constructor, conversion function, and deduction guide declarations
/// that have an explicit specifier if this explicit specifier is resolved to
/// true.
///
/// Given
/// \code
/// template<bool b>
/// struct S {
/// S(int); // #1
/// explicit S(double); // #2
/// operator int(); // #3
/// explicit operator bool(); // #4
/// explicit(false) S(bool) // # 7
/// explicit(true) S(char) // # 8
/// explicit(b) S(S) // # 9
/// };
/// S(int) -> S<true> // #5
/// explicit S(double) -> S<false> // #6
/// \endcode
/// cxxConstructorDecl(isExplicit()) will match #2 and #8, but not #1, #7 or #9.
/// cxxConversionDecl(isExplicit()) will match #4, but not #3.
/// cxxDeductionGuideDecl(isExplicit()) will match #6, but not #5.
AST_POLYMORPHIC_MATCHER(isExplicit, AST_POLYMORPHIC_SUPPORTED_TYPES(
CXXConstructorDecl, CXXConversionDecl,
CXXDeductionGuideDecl)) {
return Node.isExplicit();
}
/// Matches the expression in an explicit specifier if present in the given
/// declaration.
///
/// Given
/// \code
/// template<bool b>
/// struct S {
/// S(int); // #1
/// explicit S(double); // #2
/// operator int(); // #3
/// explicit operator bool(); // #4
/// explicit(false) S(bool) // # 7
/// explicit(true) S(char) // # 8
/// explicit(b) S(S) // # 9
/// };
/// S(int) -> S<true> // #5
/// explicit S(double) -> S<false> // #6
/// \endcode
/// cxxConstructorDecl(hasExplicitSpecifier(constantExpr())) will match #7, #8 and #9, but not #1 or #2.
/// cxxConversionDecl(hasExplicitSpecifier(constantExpr())) will not match #3 or #4.
/// cxxDeductionGuideDecl(hasExplicitSpecifier(constantExpr())) will not match #5 or #6.
AST_MATCHER_P(FunctionDecl, hasExplicitSpecifier, internal::Matcher<Expr>,
InnerMatcher) {
ExplicitSpecifier ES = ExplicitSpecifier::getFromDecl(&Node);
if (!ES.getExpr())
return false;
return InnerMatcher.matches(*ES.getExpr(), Finder, Builder);
}
/// Matches function and namespace declarations that are marked with
/// the inline keyword.
///
/// Given
/// \code
/// inline void f();
/// void g();
/// namespace n {
/// inline namespace m {}
/// }
/// \endcode
/// functionDecl(isInline()) will match ::f().
/// namespaceDecl(isInline()) will match n::m.
AST_POLYMORPHIC_MATCHER(isInline,
AST_POLYMORPHIC_SUPPORTED_TYPES(NamespaceDecl,
FunctionDecl)) {
// This is required because the spelling of the function used to determine
// whether inline is specified or not differs between the polymorphic types.
if (const auto *FD = dyn_cast<FunctionDecl>(&Node))
return FD->isInlineSpecified();
else if (const auto *NSD = dyn_cast<NamespaceDecl>(&Node))
return NSD->isInline();
llvm_unreachable("Not a valid polymorphic type");
}
/// Matches anonymous namespace declarations.
///
/// Given
/// \code
/// namespace n {
/// namespace {} // #1
/// }
/// \endcode
/// namespaceDecl(isAnonymous()) will match #1 but not ::n.
AST_MATCHER(NamespaceDecl, isAnonymous) {
return Node.isAnonymousNamespace();
}
/// Matches declarations in the namespace `std`, but not in nested namespaces.
///
/// Given
/// \code
/// class vector {};
/// namespace foo {
/// class vector {};
/// namespace std {
/// class vector {};
/// }
/// }
/// namespace std {
/// inline namespace __1 {
/// class vector {}; // #1
/// namespace experimental {
/// class vector {};
/// }
/// }
/// }
/// \endcode
/// cxxRecordDecl(hasName("vector"), isInStdNamespace()) will match only #1.
AST_MATCHER(Decl, isInStdNamespace) { return Node.isInStdNamespace(); }
/// If the given case statement does not use the GNU case range
/// extension, matches the constant given in the statement.
///
/// Given
/// \code
/// switch (1) { case 1: case 1+1: case 3 ... 4: ; }
/// \endcode
/// caseStmt(hasCaseConstant(integerLiteral()))
/// matches "case 1:"
AST_MATCHER_P(CaseStmt, hasCaseConstant, internal::Matcher<Expr>,
InnerMatcher) {
if (Node.getRHS())
return false;
return InnerMatcher.matches(*Node.getLHS(), Finder, Builder);
}
/// Matches declaration that has a given attribute.
///
/// Given
/// \code
/// __attribute__((device)) void f() { ... }
/// \endcode
/// decl(hasAttr(clang::attr::CUDADevice)) matches the function declaration of
/// f. If the matcher is used from clang-query, attr::Kind parameter should be
/// passed as a quoted string. e.g., hasAttr("attr::CUDADevice").
AST_MATCHER_P(Decl, hasAttr, attr::Kind, AttrKind) {
for (const auto *Attr : Node.attrs()) {
if (Attr->getKind() == AttrKind)
return true;
}
return false;
}
/// Matches the return value expression of a return statement
///
/// Given
/// \code
/// return a + b;
/// \endcode
/// hasReturnValue(binaryOperator())
/// matches 'return a + b'
/// with binaryOperator()
/// matching 'a + b'
AST_MATCHER_P(ReturnStmt, hasReturnValue, internal::Matcher<Expr>,
InnerMatcher) {
if (const auto *RetValue = Node.getRetValue())
return InnerMatcher.matches(*RetValue, Finder, Builder);
return false;
}
/// Matches CUDA kernel call expression.
///
/// Example matches,
/// \code
/// kernel<<<i,j>>>();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CUDAKernelCallExpr>
cudaKernelCallExpr;
/// Matches expressions that resolve to a null pointer constant, such as
/// GNU's __null, C++11's nullptr, or C's NULL macro.
///
/// Given:
/// \code
/// void *v1 = NULL;
/// void *v2 = nullptr;
/// void *v3 = __null; // GNU extension
/// char *cp = (char *)0;
/// int *ip = 0;
/// int i = 0;
/// \endcode
/// expr(nullPointerConstant())
/// matches the initializer for v1, v2, v3, cp, and ip. Does not match the
/// initializer for i.
AST_MATCHER(Expr, nullPointerConstant) {
return Node.isNullPointerConstant(Finder->getASTContext(),
Expr::NPC_ValueDependentIsNull);
}
/// Matches declaration of the function the statement belongs to
///
/// Given:
/// \code
/// F& operator=(const F& o) {
/// std::copy_if(o.begin(), o.end(), begin(), [](V v) { return v > 0; });
/// return *this;
/// }
/// \endcode
/// returnStmt(forFunction(hasName("operator=")))
/// matches 'return *this'
/// but does not match 'return v > 0'
AST_MATCHER_P(Stmt, forFunction, internal::Matcher<FunctionDecl>,
InnerMatcher) {
const auto &Parents = Finder->getASTContext().getParents(Node);
llvm::SmallVector<DynTypedNode, 8> Stack(Parents.begin(), Parents.end());
while(!Stack.empty()) {
const auto &CurNode = Stack.back();
Stack.pop_back();
if(const auto *FuncDeclNode = CurNode.get<FunctionDecl>()) {
if(InnerMatcher.matches(*FuncDeclNode, Finder, Builder)) {
return true;
}
} else if(const auto *LambdaExprNode = CurNode.get<LambdaExpr>()) {
if(InnerMatcher.matches(*LambdaExprNode->getCallOperator(),
Finder, Builder)) {
return true;
}
} else {
for(const auto &Parent: Finder->getASTContext().getParents(CurNode))
Stack.push_back(Parent);
}
}
return false;
}
/// Matches a declaration that has external formal linkage.
///
/// Example matches only z (matcher = varDecl(hasExternalFormalLinkage()))
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// int z;
/// \endcode
///
/// Example matches f() because it has external formal linkage despite being
/// unique to the translation unit as though it has internal likage
/// (matcher = functionDecl(hasExternalFormalLinkage()))
///
/// \code
/// namespace {
/// void f() {}
/// }
/// \endcode
AST_MATCHER(NamedDecl, hasExternalFormalLinkage) {
return Node.hasExternalFormalLinkage();
}
/// Matches a declaration that has default arguments.
///
/// Example matches y (matcher = parmVarDecl(hasDefaultArgument()))
/// \code
/// void x(int val) {}
/// void y(int val = 0) {}
/// \endcode
///
/// Deprecated. Use hasInitializer() instead to be able to
/// match on the contents of the default argument. For example:
///
/// \code
/// void x(int val = 7) {}
/// void y(int val = 42) {}
/// \endcode
/// parmVarDecl(hasInitializer(integerLiteral(equals(42))))
/// matches the parameter of y
///
/// A matcher such as
/// parmVarDecl(hasInitializer(anything()))
/// is equivalent to parmVarDecl(hasDefaultArgument()).
AST_MATCHER(ParmVarDecl, hasDefaultArgument) {
return Node.hasDefaultArg();
}
/// Matches array new expressions.
///
/// Given:
/// \code
/// MyClass *p1 = new MyClass[10];
/// \endcode
/// cxxNewExpr(isArray())
/// matches the expression 'new MyClass[10]'.
AST_MATCHER(CXXNewExpr, isArray) {
return Node.isArray();
}
/// Matches placement new expression arguments.
///
/// Given:
/// \code
/// MyClass *p1 = new (Storage, 16) MyClass();
/// \endcode
/// cxxNewExpr(hasPlacementArg(1, integerLiteral(equals(16))))
/// matches the expression 'new (Storage, 16) MyClass()'.
AST_MATCHER_P2(CXXNewExpr, hasPlacementArg, unsigned, Index,
internal::Matcher<Expr>, InnerMatcher) {
return Node.getNumPlacementArgs() > Index &&
InnerMatcher.matches(*Node.getPlacementArg(Index), Finder, Builder);
}
/// Matches any placement new expression arguments.
///
/// Given:
/// \code
/// MyClass *p1 = new (Storage) MyClass();
/// \endcode
/// cxxNewExpr(hasAnyPlacementArg(anything()))
/// matches the expression 'new (Storage, 16) MyClass()'.
AST_MATCHER_P(CXXNewExpr, hasAnyPlacementArg, internal::Matcher<Expr>,
InnerMatcher) {
return llvm::any_of(Node.placement_arguments(), [&](const Expr *Arg) {
return InnerMatcher.matches(*Arg, Finder, Builder);
});
}
/// Matches array new expressions with a given array size.
///
/// Given:
/// \code
/// MyClass *p1 = new MyClass[10];
/// \endcode
/// cxxNewExpr(hasArraySize(integerLiteral(equals(10))))
/// matches the expression 'new MyClass[10]'.
AST_MATCHER_P(CXXNewExpr, hasArraySize, internal::Matcher<Expr>, InnerMatcher) {
return Node.isArray() && *Node.getArraySize() &&
InnerMatcher.matches(**Node.getArraySize(), Finder, Builder);
}
/// Matches a class declaration that is defined.
///
/// Example matches x (matcher = cxxRecordDecl(hasDefinition()))
/// \code
/// class x {};
/// class y;
/// \endcode
AST_MATCHER(CXXRecordDecl, hasDefinition) {
return Node.hasDefinition();
}
/// Matches C++11 scoped enum declaration.
///
/// Example matches Y (matcher = enumDecl(isScoped()))
/// \code
/// enum X {};
/// enum class Y {};
/// \endcode
AST_MATCHER(EnumDecl, isScoped) {
return Node.isScoped();
}
/// Matches a function declared with a trailing return type.
///
/// Example matches Y (matcher = functionDecl(hasTrailingReturn()))
/// \code
/// int X() {}
/// auto Y() -> int {}
/// \endcode
AST_MATCHER(FunctionDecl, hasTrailingReturn) {
if (const auto *F = Node.getType()->getAs<FunctionProtoType>())
return F->hasTrailingReturn();
return false;
}
/// Matches expressions that match InnerMatcher that are possibly wrapped in an
/// elidable constructor and other corresponding bookkeeping nodes.
///
/// In C++17, elidable copy constructors are no longer being generated in the
/// AST as it is not permitted by the standard. They are, however, part of the
/// AST in C++14 and earlier. So, a matcher must abstract over these differences
/// to work in all language modes. This matcher skips elidable constructor-call
/// AST nodes, `ExprWithCleanups` nodes wrapping elidable constructor-calls and
/// various implicit nodes inside the constructor calls, all of which will not
/// appear in the C++17 AST.
///
/// Given
///
/// \code
/// struct H {};
/// H G();
/// void f() {
/// H D = G();
/// }
/// \endcode
///
/// ``varDecl(hasInitializer(ignoringElidableConstructorCall(callExpr())))``
/// matches ``H D = G()`` in C++11 through C++17 (and beyond).
AST_MATCHER_P(Expr, ignoringElidableConstructorCall,
ast_matchers::internal::Matcher<Expr>, InnerMatcher) {
// E tracks the node that we are examining.
const Expr *E = &Node;
// If present, remove an outer `ExprWithCleanups` corresponding to the
// underlying `CXXConstructExpr`. This check won't cover all cases of added
// `ExprWithCleanups` corresponding to `CXXConstructExpr` nodes (because the
// EWC is placed on the outermost node of the expression, which this may not
// be), but, it still improves the coverage of this matcher.
if (const auto *CleanupsExpr = dyn_cast<ExprWithCleanups>(&Node))
E = CleanupsExpr->getSubExpr();
if (const auto *CtorExpr = dyn_cast<CXXConstructExpr>(E)) {
if (CtorExpr->isElidable()) {
if (const auto *MaterializeTemp =
dyn_cast<MaterializeTemporaryExpr>(CtorExpr->getArg(0))) {
return InnerMatcher.matches(*MaterializeTemp->getSubExpr(), Finder,
Builder);
}
}
}
return InnerMatcher.matches(Node, Finder, Builder);
}
//----------------------------------------------------------------------------//
// OpenMP handling.
//----------------------------------------------------------------------------//
/// Matches any ``#pragma omp`` executable directive.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp taskyield
/// \endcode
///
/// ``ompExecutableDirective()`` matches ``omp parallel``,
/// ``omp parallel default(none)`` and ``omp taskyield``.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, OMPExecutableDirective>
ompExecutableDirective;
/// Matches standalone OpenMP directives,
/// i.e., directives that can't have a structured block.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// {}
/// #pragma omp taskyield
/// \endcode
///
/// ``ompExecutableDirective(isStandaloneDirective()))`` matches
/// ``omp taskyield``.
AST_MATCHER(OMPExecutableDirective, isStandaloneDirective) {
return Node.isStandaloneDirective();
}
/// Matches the structured-block of the OpenMP executable directive
///
/// Prerequisite: the executable directive must not be standalone directive.
/// If it is, it will never match.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// ;
/// #pragma omp parallel
/// {}
/// \endcode
///
/// ``ompExecutableDirective(hasStructuredBlock(nullStmt()))`` will match ``;``
AST_MATCHER_P(OMPExecutableDirective, hasStructuredBlock,
internal::Matcher<Stmt>, InnerMatcher) {
if (Node.isStandaloneDirective())
return false; // Standalone directives have no structured blocks.
return InnerMatcher.matches(*Node.getStructuredBlock(), Finder, Builder);
}
/// Matches any clause in an OpenMP directive.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// \endcode
///
/// ``ompExecutableDirective(hasAnyClause(anything()))`` matches
/// ``omp parallel default(none)``.
AST_MATCHER_P(OMPExecutableDirective, hasAnyClause,
internal::Matcher<OMPClause>, InnerMatcher) {
ArrayRef<OMPClause *> Clauses = Node.clauses();
return matchesFirstInPointerRange(InnerMatcher, Clauses.begin(),
Clauses.end(), Finder, Builder);
}
/// Matches OpenMP ``default`` clause.
///
/// Given
///
/// \code
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// #pragma omp parallel
/// \endcode
///
/// ``ompDefaultClause()`` matches ``default(none)`` and ``default(shared)``.
extern const internal::VariadicDynCastAllOfMatcher<OMPClause, OMPDefaultClause>
ompDefaultClause;
/// Matches if the OpenMP ``default`` clause has ``none`` kind specified.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// \endcode
///
/// ``ompDefaultClause(isNoneKind())`` matches only ``default(none)``.
AST_MATCHER(OMPDefaultClause, isNoneKind) {
return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_none;
}
/// Matches if the OpenMP ``default`` clause has ``shared`` kind specified.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// \endcode
///
/// ``ompDefaultClause(isSharedKind())`` matches only ``default(shared)``.
AST_MATCHER(OMPDefaultClause, isSharedKind) {
return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_shared;
}
/// Matches if the OpenMP directive is allowed to contain the specified OpenMP
/// clause kind.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel for
/// #pragma omp for
/// \endcode
///
/// `ompExecutableDirective(isAllowedToContainClause(OMPC_default))`` matches
/// ``omp parallel`` and ``omp parallel for``.
///
/// If the matcher is use from clang-query, ``OpenMPClauseKind`` parameter
/// should be passed as a quoted string. e.g.,
/// ``isAllowedToContainClauseKind("OMPC_default").``
AST_MATCHER_P(OMPExecutableDirective, isAllowedToContainClauseKind,
OpenMPClauseKind, CKind) {
return llvm::omp::isAllowedClauseForDirective(
Node.getDirectiveKind(), CKind,
Finder->getASTContext().getLangOpts().OpenMP);
}
//----------------------------------------------------------------------------//
// End OpenMP handling.
//----------------------------------------------------------------------------//
} // namespace ast_matchers
} // namespace clang
#endif // LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
|
SybasePROP_fmt_plug.c | /* SybasePROP cracker. Hacked together during November of 2013 by Dhiru Kholia
* <dhiru [at] openwall.com>.
*
* This software is Copyright (c) 2013, Dhiru Kholia <dhiru [at] openwall.com>,
* Frank Benhamou, Gregory Terrien and Marcel Major and it is hereby released
* to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* All credits for reversing this algorithm go to Marcel Major, Frank Benhamou
* and Gregory Terrien. Dhiru Kholia just glued together the bits (as usual!).
*
* [1] http://www.nes.fr/securitylab/?p=1128 (in French!)
*
* [2] https://hacktivity.com/hu/letoltesek/archivum/57/
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_sybaseprop;
#elif FMT_REGISTERS_H
john_register_one(&fmt_sybaseprop);
#else
#include "arch.h"
#include "params.h"
#include "common.h"
#include "formats.h"
#include "options.h"
#include "syb-prop_repro.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 16
#endif
static int omp_t = 1;
#endif
#include "memdbg.h"
#define BLOCK_SIZE 8
#define FORMAT_LABEL "Sybase-PROP"
#define FORMAT_NAME ""
#define ALGORITHM_NAME "salted FEAL-8 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 64
#define CIPHERTEXT_LENGTH (6 + 56)
#define PREFIX_VALUE "0x"
#define PREFIX_LENGTH 2
#define BINARY_SIZE 56 / 2
#define BINARY_ALIGN 4
#define SALT_SIZE 1 // see the definition of generate_hash, note "unsigned char seed" argument
#define SALT_SIZE_HEX 2
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 128
static struct fmt_tests SybasePROP_tests[] = {
{"0x2905aeb3d00e3b80fb0695cb34c9fa9080f84ae1824b24cc51a3849dcb06", "test11"},
{"0x3f05fc3d526946d9936c63dd798c5fa1b980747b1d81d0b9b2e8197d2aca", "test12"},
{NULL}
};
static unsigned char saved_salt;
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
if (omp_t > 1) {
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
}
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p = ciphertext + PREFIX_LENGTH;
int extra;
if (strncmp(ciphertext, PREFIX_VALUE, PREFIX_LENGTH))
return 0;
if (hexlenl(p, &extra) != CIPHERTEXT_LENGTH-PREFIX_LENGTH || extra)
return 0;
return 1;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE+1];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
p = ciphertext + PREFIX_LENGTH + SALT_SIZE_HEX + 2; // last 2 bytes always seem to be "05"
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static void *get_salt(char *ciphertext)
{
char *p = ciphertext + PREFIX_LENGTH;
static unsigned char salt;
salt = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])];
return (void*)&salt;
}
static void set_salt(void *salt)
{
saved_salt = ((unsigned char*)salt)[0];
}
static void set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++) {
generate_hash((unsigned char*)saved_key[index], saved_salt,
(unsigned char*)crypt_out[index]);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
struct fmt_main fmt_sybaseprop = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ PREFIX_VALUE },
SybasePROP_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
fc_kernel_fp16_arm82.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: xlchen@openailab.com
*/
#include "fc_kernel_fp16_arm82.h"
#include <stdint.h>
#include <stdlib.h>
#include <math.h>
#include <arm_neon.h>
void hgemv_1x8_a55(__fp16* biases, __fp16* input, __fp16* kernel, long kernel_size, __fp16* output);
void hgemv_1x2_a55(__fp16* biases, __fp16* input, __fp16* kernel, long kernel_size, __fp16* output);
// start and end channel must be 8 aligned
void hgemv1x8(const __fp16* input, const __fp16* output, __fp16* weight_interleaved, const __fp16* biases,
int kernel_size, int start_channel, int end_channel, int num_thread, int cpu_affinity)
{
int ch = 0;
__fp16 *cur_kernel, *cur_biases, *cur_result;
// #pragma omp parallel for num_threads(num_thread)
for(ch = start_channel; ch < end_channel; ch += 8)
{
cur_kernel = ( __fp16* )(weight_interleaved + kernel_size * ch);
cur_result = ( __fp16* )(output + ch);
cur_biases = biases ? ( __fp16* )(biases + ch) : NULL;
hgemv_1x8_a55(cur_biases, ( __fp16* )input, cur_kernel, kernel_size, cur_result); // todo implement with A76
}
}
// start channel must be 2 aligned
void hgemv1x2(const __fp16* input, const __fp16* output, __fp16* weight_interleaved, const __fp16* biases,
int kernel_size, int start_channel, int end_channel, int num_thread, int cpu_affinity)
{
__fp16 sum;
int ch = 0;
__fp16 *cur_kernel, *cur_biases, *cur_result;
for(ch = start_channel; ch < (end_channel & -2); ch += 2)
{
cur_kernel = ( __fp16* )(weight_interleaved + kernel_size * ch);
cur_result = ( __fp16* )(output + ch);
cur_biases = biases ? ( __fp16* )(biases + ch) : NULL;
hgemv_1x2_a55(cur_biases, ( __fp16* )input, cur_kernel, kernel_size, cur_result);
}
if(end_channel & 0x1)
{
cur_kernel = ( __fp16* )(weight_interleaved + kernel_size * ch);
cur_result = ( __fp16* )(output + ch);
sum = biases ? *(biases + ch) : 0.f;
for(int j = 0; j < kernel_size; j++)
sum = sum + input[j] * cur_kernel[j];
*cur_result = sum;
}
}
static void interleave_kernel(const __fp16* kernel, __fp16* kernel_interleaved, int out_chan, int kernel_size)
{
int i, j, k;
__fp16* cur_kernel[8];
__fp16* cur_kernel_interleaved;
// interleave 8 kernel
for(i = 0; i < (out_chan & -8); i += 8)
{
for(j = 0; j < 8; j++)
cur_kernel[j] = ( __fp16* )kernel + kernel_size * (i + j);
cur_kernel_interleaved = ( __fp16* )kernel_interleaved + kernel_size * i;
for(k = 0; k < kernel_size; k++)
for(j = 0; j < 8; j++)
cur_kernel_interleaved[8 * k + j] = *(cur_kernel[j] + k);
}
// interleave 2 kernel
for(; i < (out_chan & -2); i += 2)
{
for(j = 0; j < 2; j++)
cur_kernel[j] = ( __fp16* )kernel + kernel_size * (i + j);
cur_kernel_interleaved = ( __fp16* )kernel_interleaved + kernel_size * i;
for(k = 0; k < kernel_size; k++)
for(j = 0; j < 2; j++)
cur_kernel_interleaved[2 * k + j] = *(cur_kernel[j] + k);
}
// copy last kernel
if(out_chan & 0x1)
{
cur_kernel[0] = ( __fp16* )kernel + kernel_size * i;
cur_kernel_interleaved = ( __fp16* )kernel_interleaved + kernel_size * i;
for(k = 0; k < kernel_size; k++)
cur_kernel_interleaved[k] = *(cur_kernel[0] + k);
}
return;
}
int fp16_fc_kernel_prerun(struct tensor* input_tensor , \
struct tensor* filter_tensor , \
struct tensor* output_tensor , \
struct fc_priv_info* priv_info , \
struct fc_param* param)
{
int num_output = param->num_output;
int kernel_size = filter_tensor->dims[1];
int kernel_align = ((kernel_size + 1) & -2);
if (!priv_info->interleave_buffer)
{
int mem_size = sizeof(__fp16) * num_output * kernel_align;
void* mem = sys_malloc(mem_size);
priv_info->interleave_buffer = mem;
priv_info->interleave_buffer_size = mem_size;
}
if (!priv_info->input_buffer)
{
int mem_size = sizeof(__fp16) * kernel_align;
void* mem = sys_malloc(mem_size);
priv_info->input_buffer = mem;
priv_info->input_buffer_size = mem_size;
}
__fp16* filter_data = (__fp16*)filter_tensor->data;
interleave_kernel(filter_data, (__fp16*)priv_info->interleave_buffer, num_output, kernel_size);
return 0;
}
int fp16_fc_kernel_run(struct tensor* input_tensor , \
struct tensor* filter_tensor , \
struct tensor* bias_tensor , \
struct tensor* output_tensor , \
struct fc_priv_info* priv_info , \
struct fc_param* param, \
int num_thread, int cpu_affinity)
{
int out_num = param->num_output;
int kernel_size = filter_tensor->dims[1];
__fp16* input = (__fp16*)input_tensor->data;
__fp16* output = (__fp16*)output_tensor->data;
__fp16* weight = (__fp16*)priv_info->interleave_buffer;
__fp16* biases = NULL;
if (bias_tensor)
biases = (__fp16*)bias_tensor->data;
int out_num_8 = out_num & ~7;
for(int i = 0; i < input_tensor->dims[0]; i++)
{
__fp16* cur_input = input + i * kernel_size;
__fp16* cur_output = output + i * out_num;
hgemv1x8(cur_input, cur_output, weight, biases, kernel_size, 0, out_num_8, num_thread, cpu_affinity);
if(out_num & 0x7)
hgemv1x2(cur_input, cur_output, weight, biases, kernel_size, out_num_8, out_num, num_thread, cpu_affinity);
}
return 0 ;
}
|
GB_binop__isgt_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isgt_int32)
// A.*B function (eWiseMult): GB (_AemultB_08__isgt_int32)
// A.*B function (eWiseMult): GB (_AemultB_02__isgt_int32)
// A.*B function (eWiseMult): GB (_AemultB_04__isgt_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isgt_int32)
// A*D function (colscale): GB (_AxD__isgt_int32)
// D*A function (rowscale): GB (_DxB__isgt_int32)
// C+=B function (dense accum): GB (_Cdense_accumB__isgt_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__isgt_int32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isgt_int32)
// C=scalar+B GB (_bind1st__isgt_int32)
// C=scalar+B' GB (_bind1st_tran__isgt_int32)
// C=A+scalar GB (_bind2nd__isgt_int32)
// C=A'+scalar GB (_bind2nd_tran__isgt_int32)
// C type: int32_t
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = (aij > bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int32_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int32_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x > y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGT || GxB_NO_INT32 || GxB_NO_ISGT_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__isgt_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isgt_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isgt_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isgt_int32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isgt_int32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isgt_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isgt_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isgt_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isgt_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isgt_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isgt_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x > bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isgt_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij > y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x > aij) ; \
}
GrB_Info GB (_bind1st_tran__isgt_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij > y) ; \
}
GrB_Info GB (_bind2nd_tran__isgt_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
TinyDFT_typedef.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <math.h>
#include <libgen.h>
#include <float.h>
#include <time.h>
#include <omp.h>
#ifdef USE_LIBXC
#include <xc.h>
#endif
#include "libCMS.h"
#include "utils.h"
#include "TinyDFT_typedef.h"
#include "build_HF_mat.h"
#include "build_Dmat.h"
#include "CDIIS.h"
// Compute screening value of each shell pair and find all
// unique shell pairs that survive Schwarz screening
// Input parameter:
// TinyDFT : Initialized TinyDFT structure
// Output parameters:
// TinyDFT : TinyDFT structure with screening info
static void TinyDFT_screen_shell_quartets(TinyDFT_p TinyDFT);
void TinyDFT_init(TinyDFT_p *TinyDFT_, char *bas_fname, char *xyz_fname)
{
TinyDFT_p TinyDFT = (TinyDFT_p) malloc(sizeof(struct TinyDFT_struct));
assert(TinyDFT != NULL);
double st = get_wtime_sec();
TinyDFT->nthread = omp_get_max_threads();
// Reset statistic info
TinyDFT->mem_size = 0.0;
TinyDFT->init_time = 0.0;
TinyDFT->S_Hcore_time = 0.0;
TinyDFT->shell_scr_time = 0.0;
// Load basis set and molecule from input
CMS_createBasisSet(&(TinyDFT->basis));
CMS_loadChemicalSystem(TinyDFT->basis, bas_fname, xyz_fname);
int maxAM = CMS_getMaxMomentum(TinyDFT->basis);
TinyDFT->bas_name = basename(bas_fname);
TinyDFT->mol_name = basename(xyz_fname);
TinyDFT->natom = CMS_getNumAtoms (TinyDFT->basis);
TinyDFT->nshell = CMS_getNumShells (TinyDFT->basis);
TinyDFT->nbf = CMS_getNumFuncs (TinyDFT->basis);
TinyDFT->n_occ = CMS_getNumOccOrb (TinyDFT->basis);
TinyDFT->charge = CMS_getTotalCharge(TinyDFT->basis);
TinyDFT->electron = CMS_getNneutral (TinyDFT->basis);
TinyDFT->num_total_sp = TinyDFT->nshell * TinyDFT->nshell;
TinyDFT->num_valid_sp = (TinyDFT->nshell + 1) * TinyDFT->nshell / 2;
TinyDFT->mat_size = TinyDFT->nbf * TinyDFT->nbf;
TinyDFT->max_dim = (maxAM + 1) * (maxAM + 2) / 2;
TinyDFT->prim_scrtol = 1e-14;
TinyDFT->shell_scrtol2 = 1e-11 * 1e-11;
TinyDFT->E_nuc_rep = CMS_getNucEnergy(TinyDFT->basis);
printf("Job information:\n");
printf(" basis set = %s\n", TinyDFT->bas_name);
printf(" molecule = %s\n", TinyDFT->mol_name);
printf(" atoms = %d\n", TinyDFT->natom);
printf(" shells = %d\n", TinyDFT->nshell);
printf(" basis functions = %d\n", TinyDFT->nbf);
printf(" occupied orbits = %d\n", TinyDFT->n_occ);
printf(" charge = %d\n", TinyDFT->charge);
printf(" electrons = %d\n", TinyDFT->electron);
int nthread = TinyDFT->nthread;
int nshell = TinyDFT->nshell;
int nbf = TinyDFT->nbf;
int n_occ = TinyDFT->n_occ;
int num_total_sp = TinyDFT->num_total_sp;
int num_valid_sp = TinyDFT->num_valid_sp;
// Allocate memory for ERI info arrays for direct approach
CMS_Simint_init(TinyDFT->basis, &(TinyDFT->simint), nthread, TinyDFT->prim_scrtol);
TinyDFT->valid_sp_lid = (int*) malloc_aligned(INT_MSIZE * num_valid_sp, 64);
TinyDFT->valid_sp_rid = (int*) malloc_aligned(INT_MSIZE * num_valid_sp, 64);
TinyDFT->shell_bf_sind = (int*) malloc_aligned(INT_MSIZE * (nshell + 1), 64);
TinyDFT->shell_bf_num = (int*) malloc_aligned(INT_MSIZE * nshell, 64);
TinyDFT->sp_scrval = (double*) malloc_aligned(DBL_MSIZE * num_total_sp, 64);
TinyDFT->bf_pair_scrval = (double*) malloc_aligned(DBL_MSIZE * nbf * nbf, 64);
assert(TinyDFT->valid_sp_lid != NULL);
assert(TinyDFT->valid_sp_rid != NULL);
assert(TinyDFT->shell_bf_sind != NULL);
assert(TinyDFT->shell_bf_num != NULL);
assert(TinyDFT->sp_scrval != NULL);
TinyDFT->mem_size += (double) (INT_MSIZE * 2 * TinyDFT->num_valid_sp);
TinyDFT->mem_size += (double) (INT_MSIZE * (2 * nshell + 1));
TinyDFT->mem_size += (double) (DBL_MSIZE * num_total_sp);
TinyDFT->mem_size += (double) (DBL_MSIZE * nbf * nbf);
for (int i = 0; i < nshell; i++)
{
TinyDFT->shell_bf_sind[i] = CMS_getFuncStartInd(TinyDFT->basis, i);
TinyDFT->shell_bf_num[i] = CMS_getShellDim (TinyDFT->basis, i);
}
TinyDFT->shell_bf_sind[nshell] = nbf;
// Molecular system and ERI info for density fitting will
// be allocated later if needed
TinyDFT->df_shell_bf_sind = NULL;
TinyDFT->df_shell_bf_num = NULL;
TinyDFT->bf_pair_mask = NULL;
TinyDFT->bf_pair_j = NULL;
TinyDFT->bf_pair_diag = NULL;
TinyDFT->bf_mask_displs = NULL;
TinyDFT->df_sp_scrval = NULL;
TinyDFT->df_basis = NULL;
// Flattened Gaussian basis function and atom info used only
// in XC calculation will be allocated if needed
TinyDFT->atom_idx = NULL;
TinyDFT->bf_nprim = NULL;
TinyDFT->atom_xyz = NULL;
TinyDFT->bf_coef = NULL;
TinyDFT->bf_alpha = NULL;
TinyDFT->bf_exp = NULL;
TinyDFT->bf_center = NULL;
// Allocate memory for matrices and arrays used only in build_HF_mat
size_t mat_msize = DBL_MSIZE * TinyDFT->mat_size;
size_t MN_strip_msize = DBL_MSIZE * TinyDFT->max_dim * nbf;
size_t max_buf_entry_size = TinyDFT->max_dim * TinyDFT->max_dim;
size_t total_buf_size = max_buf_entry_size * 6 * nthread;
TinyDFT->max_JKacc_buf = max_buf_entry_size * 6;
TinyDFT->blk_mat_ptr = (int*) malloc_aligned(INT_MSIZE * TinyDFT->num_total_sp, 64);
TinyDFT->Mpair_flag = (int*) malloc_aligned(INT_MSIZE * nshell * nthread, 64);
TinyDFT->Npair_flag = (int*) malloc_aligned(INT_MSIZE * nshell * nthread, 64);
TinyDFT->J_blk_mat = (double*) malloc_aligned(mat_msize, 64);
TinyDFT->K_blk_mat = (double*) malloc_aligned(mat_msize, 64);
TinyDFT->D_blk_mat = (double*) malloc_aligned(mat_msize, 64);
TinyDFT->JKacc_buf = (double*) malloc_aligned(DBL_MSIZE * total_buf_size, 64);
TinyDFT->FM_strip_buf = (double*) malloc_aligned(MN_strip_msize * nthread, 64);
TinyDFT->FN_strip_buf = (double*) malloc_aligned(MN_strip_msize * nthread, 64);
assert(TinyDFT->blk_mat_ptr != NULL);
assert(TinyDFT->Mpair_flag != NULL);
assert(TinyDFT->Npair_flag != NULL);
assert(TinyDFT->J_blk_mat != NULL);
assert(TinyDFT->K_blk_mat != NULL);
assert(TinyDFT->D_blk_mat != NULL);
assert(TinyDFT->JKacc_buf != NULL);
assert(TinyDFT->FM_strip_buf != NULL);
assert(TinyDFT->FN_strip_buf != NULL);
TinyDFT->mem_size += (double) (INT_MSIZE * TinyDFT->num_total_sp);
TinyDFT->mem_size += (double) (2 * INT_MSIZE * nshell * nthread);
TinyDFT->mem_size += (double) (3 * mat_msize);
TinyDFT->mem_size += (double) (2 * MN_strip_msize * nthread);
TinyDFT->mem_size += (double) (DBL_MSIZE * total_buf_size);
int pos = 0, idx = 0;
for (int i = 0; i < nshell; i++)
{
for (int j = 0; j < nshell; j++)
{
TinyDFT->blk_mat_ptr[idx] = pos;
pos += TinyDFT->shell_bf_num[i] * TinyDFT->shell_bf_num[j];
idx++;
}
}
// Matrices and arrays used in XC functional calculation will
// be allocated later if needed
TinyDFT->int_grid = NULL;
TinyDFT->phi = NULL;
TinyDFT->rho = NULL;
TinyDFT->exc = NULL;
TinyDFT->vxc = NULL;
TinyDFT->vsigma = NULL;
TinyDFT->XC_workbuf = NULL;
TinyDFT->xf_impl = 1;
TinyDFT->cf_impl = 1;
// Allocate memory for matrices used in multiple modules
TinyDFT->tmp_mat = (double*) malloc_aligned(mat_msize, 64);
assert(TinyDFT->tmp_mat != NULL);
TinyDFT->mem_size += (double) (mat_msize);
// Allocate memory for matrices and arrays used only in build_Dmat
TinyDFT->ev_idx = (int*) malloc_aligned(INT_MSIZE * nbf, 64);
TinyDFT->eigval = (double*) malloc_aligned(DBL_MSIZE * nbf, 64);
assert(TinyDFT->ev_idx != NULL);
assert(TinyDFT->eigval != NULL);
TinyDFT->mem_size += (double) ((DBL_MSIZE + INT_MSIZE) * nbf);
// Allocate memory for matrices and arrays used only in CDIIS
int MAX_DIIS_1 = MAX_DIIS + 1;
size_t DIIS_row_msize = DBL_MSIZE * MAX_DIIS_1;
TinyDFT->F0_mat = (double*) malloc_aligned(mat_msize * MAX_DIIS, 64);
TinyDFT->R_mat = (double*) malloc_aligned(mat_msize * MAX_DIIS, 64);
TinyDFT->B_mat = (double*) malloc_aligned(DIIS_row_msize * MAX_DIIS_1, 64);
TinyDFT->FDS_mat = (double*) malloc_aligned(mat_msize, 64);
TinyDFT->DIIS_rhs = (double*) malloc_aligned(DIIS_row_msize, 64);
TinyDFT->DIIS_ipiv = (int*) malloc_aligned(INT_MSIZE * MAX_DIIS_1, 64);
assert(TinyDFT->F0_mat != NULL);
assert(TinyDFT->R_mat != NULL);
assert(TinyDFT->B_mat != NULL);
assert(TinyDFT->DIIS_rhs != NULL);
assert(TinyDFT->DIIS_ipiv != NULL);
TinyDFT->mem_size += MAX_DIIS * 2 * (double) mat_msize;
TinyDFT->mem_size += (double) DIIS_row_msize * (MAX_DIIS + 2);
TinyDFT->mem_size += (double) (INT_MSIZE * MAX_DIIS_1);
TinyDFT->mem_size += (double) mat_msize;
// Must initialize F0 and R as 0
memset(TinyDFT->F0_mat, 0, mat_msize * MAX_DIIS);
memset(TinyDFT->R_mat, 0, mat_msize * MAX_DIIS);
TinyDFT->DIIS_len = 0;
// Initialize B_mat
for (int i = 0; i < MAX_DIIS_1 * MAX_DIIS_1; i++) TinyDFT->B_mat[i] = -1.0;
for (int i = 0; i < MAX_DIIS_1; i++) TinyDFT->B_mat[i * MAX_DIIS_1 + i] = 0.0;
TinyDFT->DIIS_bmax_id = 0;
TinyDFT->DIIS_bmax = -DBL_MAX;
// Allocate memory for matrices and arrays used only in SCF iterations
TinyDFT->E_tol = 1e-10;
TinyDFT->Hcore_mat = (double*) malloc_aligned(mat_msize, 64);
TinyDFT->S_mat = (double*) malloc_aligned(mat_msize, 64);
TinyDFT->X_mat = (double*) malloc_aligned(mat_msize, 64);
TinyDFT->J_mat = (double*) malloc_aligned(mat_msize, 64);
TinyDFT->K_mat = (double*) malloc_aligned(mat_msize, 64);
TinyDFT->XC_mat = (double*) malloc_aligned(mat_msize, 64);
TinyDFT->F_mat = (double*) malloc_aligned(mat_msize, 64);
TinyDFT->D_mat = (double*) malloc_aligned(mat_msize, 64);
TinyDFT->Cocc_mat = (double*) malloc_aligned(DBL_MSIZE * n_occ * nbf, 64);
assert(TinyDFT->Hcore_mat != NULL);
assert(TinyDFT->S_mat != NULL);
assert(TinyDFT->X_mat != NULL);
assert(TinyDFT->J_mat != NULL);
assert(TinyDFT->K_mat != NULL);
assert(TinyDFT->XC_mat != NULL);
assert(TinyDFT->F_mat != NULL);
assert(TinyDFT->D_mat != NULL);
assert(TinyDFT->Cocc_mat != NULL);
TinyDFT->mem_size += (double) (8 * mat_msize);
TinyDFT->mem_size += (double) (DBL_MSIZE * n_occ * nbf);
memset(TinyDFT->Cocc_mat, 0, DBL_MSIZE * n_occ * nbf);
// Tensors and matrices used only in build_JKDF will
// be allocated later if needed
TinyDFT->mat_K_m = NULL;
TinyDFT->mat_K_n = NULL;
TinyDFT->mat_K_k = NULL;
TinyDFT->mat_K_lda = NULL;
TinyDFT->mat_K_ldb = NULL;
TinyDFT->mat_K_ldc = NULL;
TinyDFT->mat_K_beta = NULL;
TinyDFT->mat_K_alpha = NULL;
TinyDFT->pqA = NULL;
TinyDFT->Jpq = NULL;
TinyDFT->df_tensor = NULL;
TinyDFT->temp_J = NULL;
TinyDFT->temp_K = NULL;
TinyDFT->mat_K_a = NULL;
TinyDFT->mat_K_b = NULL;
TinyDFT->mat_K_c = NULL;
TinyDFT->mat_K_transa = NULL;
TinyDFT->mat_K_transb = NULL;
double et = get_wtime_sec();
TinyDFT->init_time = et - st;
// Print memory usage and time consumption
printf("TinyDFT memory allocation and initialization over, elapsed time = %.3lf (s)\n", TinyDFT->init_time);
TinyDFT_screen_shell_quartets(TinyDFT);
*TinyDFT_ = TinyDFT;
}
void TinyDFT_destroy(TinyDFT_p *_TinyDFT)
{
TinyDFT_p TinyDFT = *_TinyDFT;
assert(TinyDFT != NULL);
printf("TinyDFT total memory usage = %.2lf MB\n", TinyDFT->mem_size / 1048576.0);
// Free ERI info arrays for direct approach
free_aligned(TinyDFT->valid_sp_lid);
free_aligned(TinyDFT->valid_sp_rid);
free_aligned(TinyDFT->shell_bf_sind);
free_aligned(TinyDFT->shell_bf_num);
free_aligned(TinyDFT->sp_scrval);
free_aligned(TinyDFT->bf_pair_scrval);
// Free ERI info arrays for density fitting
free_aligned(TinyDFT->df_shell_bf_sind);
free_aligned(TinyDFT->df_shell_bf_num);
free_aligned(TinyDFT->bf_pair_mask);
free_aligned(TinyDFT->bf_pair_j);
free_aligned(TinyDFT->bf_pair_diag);
free_aligned(TinyDFT->bf_mask_displs);
free_aligned(TinyDFT->df_sp_scrval);
// Free flattened Gaussian basis function and atom info used only
// in XC calculation
free_aligned(TinyDFT->atom_idx);
free_aligned(TinyDFT->bf_nprim);
free_aligned(TinyDFT->atom_xyz);
free_aligned(TinyDFT->bf_coef);
free_aligned(TinyDFT->bf_alpha);
free_aligned(TinyDFT->bf_exp);
free_aligned(TinyDFT->bf_center);
// Free matrices and temporary arrays used only in build_HF_mat
free_aligned(TinyDFT->blk_mat_ptr);
free_aligned(TinyDFT->Mpair_flag);
free_aligned(TinyDFT->Npair_flag);
free_aligned(TinyDFT->J_blk_mat);
free_aligned(TinyDFT->K_blk_mat);
free_aligned(TinyDFT->D_blk_mat);
free_aligned(TinyDFT->JKacc_buf);
free_aligned(TinyDFT->FM_strip_buf);
free_aligned(TinyDFT->FN_strip_buf);
// Free matrices and arrays used in XC functional calculation
free(TinyDFT->int_grid);
free_aligned(TinyDFT->phi);
free_aligned(TinyDFT->rho);
free_aligned(TinyDFT->exc);
free_aligned(TinyDFT->vxc);
free_aligned(TinyDFT->vsigma);
free_aligned(TinyDFT->XC_workbuf);
#ifdef USE_LIBXC
if (TinyDFT->xf_impl == 0) xc_func_end(&TinyDFT->libxc_xf);
if (TinyDFT->cf_impl == 0) xc_func_end(&TinyDFT->libxc_cf);
#endif
// Free matrices used in multiple modules
free_aligned(TinyDFT->tmp_mat);
// Free matrices and arrays used only in build_Dmat
free_aligned(TinyDFT->ev_idx);
free_aligned(TinyDFT->eigval);
// Free matrices and temporary arrays used only in CDIIS
free_aligned(TinyDFT->F0_mat);
free_aligned(TinyDFT->R_mat);
free_aligned(TinyDFT->B_mat);
free_aligned(TinyDFT->FDS_mat);
free_aligned(TinyDFT->DIIS_rhs);
free_aligned(TinyDFT->DIIS_ipiv);
// Free matrices and temporary arrays used only in SCF
free_aligned(TinyDFT->Hcore_mat);
free_aligned(TinyDFT->S_mat);
free_aligned(TinyDFT->F_mat);
free_aligned(TinyDFT->D_mat);
free_aligned(TinyDFT->J_mat);
free_aligned(TinyDFT->K_mat);
free_aligned(TinyDFT->X_mat);
free_aligned(TinyDFT->Cocc_mat);
// Free Tensors and matrices used only in build_JKDF
free(TinyDFT->mat_K_m);
free(TinyDFT->mat_K_n);
free(TinyDFT->mat_K_k);
free(TinyDFT->mat_K_lda);
free(TinyDFT->mat_K_ldb);
free(TinyDFT->mat_K_ldc);
free(TinyDFT->mat_K_beta);
free(TinyDFT->mat_K_alpha);
free_aligned(TinyDFT->pqA);
free_aligned(TinyDFT->Jpq);
free_aligned(TinyDFT->df_tensor);
free_aligned(TinyDFT->temp_J);
free_aligned(TinyDFT->temp_K);
free(TinyDFT->mat_K_a);
free(TinyDFT->mat_K_b);
free(TinyDFT->mat_K_c);
free(TinyDFT->mat_K_transa);
free(TinyDFT->mat_K_transb);
// Free BasisSet_t and Simint_t object, print Simint_t object stat info
CMS_destroyBasisSet(TinyDFT->basis);
CMS_Simint_destroy(TinyDFT->simint, 1);
free(TinyDFT);
*_TinyDFT = NULL;
}
static int cmp_pair(int M1, int N1, int M2, int N2)
{
if (M1 == M2) return (N1 < N2);
else return (M1 < M2);
}
static void quickSort(int *M, int *N, int l, int r)
{
int i = l, j = r, tmp;
int mid_M = M[(i + j) / 2];
int mid_N = N[(i + j) / 2];
while (i <= j)
{
while (cmp_pair(M[i], N[i], mid_M, mid_N)) i++;
while (cmp_pair(mid_M, mid_N, M[j], N[j])) j--;
if (i <= j)
{
tmp = M[i]; M[i] = M[j]; M[j] = tmp;
tmp = N[i]; N[i] = N[j]; N[j] = tmp;
i++; j--;
}
}
if (i < r) quickSort(M, N, i, r);
if (j > l) quickSort(M, N, l, j);
}
static void TinyDFT_screen_shell_quartets(TinyDFT_p TinyDFT)
{
assert(TinyDFT != NULL);
int nshell = TinyDFT->nshell;
int nbf = TinyDFT->nbf;
int *shell_bf_num = TinyDFT->shell_bf_num;
int *shell_bf_sind = TinyDFT->shell_bf_sind;
int *valid_sp_lid = TinyDFT->valid_sp_lid;
int *valid_sp_rid = TinyDFT->valid_sp_rid;
double shell_scrtol2 = TinyDFT->shell_scrtol2;
double *sp_scrval = TinyDFT->sp_scrval;
double *bf_pair_scrval = TinyDFT->bf_pair_scrval;
Simint_p simint = TinyDFT->simint;
double st = get_wtime_sec();
// Compute screening values using Schwarz inequality
double global_max_scrval = 0.0;
#pragma omp parallel
{
int tid = omp_get_thread_num();
void *thread_MN_sp;
CMS_Simint_create_multi_sp(&thread_MN_sp);
#pragma omp for schedule(dynamic) reduction(max:global_max_scrval)
for (int M = 0; M < nshell; M++)
{
int dimM = shell_bf_num[M];
int M_bf_idx = shell_bf_sind[M];
for (int N = 0; N < nshell; N++)
{
int dimN = shell_bf_num[N];
int N_bf_idx = shell_bf_sind[N];
int nint;
double *eri;
CMS_Simint_calc_MNMN_shellquartet(simint, tid, M, N, &thread_MN_sp, &eri, &nint);
double maxval = 0.0;
if (nint > 0)
{
// Loop over all ERIs in a shell quartet and find the max value
for (int iM = 0; iM < dimM; iM++)
{
for (int iN = 0; iN < dimN; iN++)
{
int index = iN * (dimM * dimN * dimM + dimM) + iM * (dimN * dimM + 1); // Simint layout
double val = fabs(eri[index]);
int bf_idx = (M_bf_idx + iM) * nbf + (N_bf_idx + iN);
bf_pair_scrval[bf_idx] = val;
if (val > maxval) maxval = val;
}
}
} else {
for (int iM = 0; iM < dimM; iM++)
for (int iN = 0; iN < dimN; iN++)
{
int bf_idx = (M_bf_idx + iM) * nbf + (N_bf_idx + iN);
bf_pair_scrval[bf_idx] = 0.0;
}
}
sp_scrval[M * nshell + N] = maxval;
if (maxval > global_max_scrval) global_max_scrval = maxval;
} // End of "for (int N = 0; N < nshell; N++)"
} // End of "for (int M = 0; M < nshell; M++)"
CMS_Simint_free_multi_sp(thread_MN_sp);
} // End of "#pragma omp parallel"
// Reset Simint statistic info
CMS_Simint_reset_stat_info(simint);
// Generate unique shell pairs that survive Schwarz screening
// eta is the threshold for screening a shell pair
double eta = shell_scrtol2 / global_max_scrval;
int num_valid_sp = 0;
for (int M = 0; M < nshell; M++)
{
for (int N = 0; N < nshell; N++)
{
double MN_scrval = sp_scrval[M * nshell + N];
// if sp_scrval * max_scrval < shell_scrtol2, for any given shell pair
// (P,Q), (MN|PQ) is always < shell_scrtol2 and will be screened
if (MN_scrval > eta)
{
// Make {N_i} in (M, N_i) as continuous as possible to get better
// memory access pattern and better performance
if (N > M) continue;
// We want AM(M) >= AM(N) to avoid HRR
int MN_id = CMS_Simint_get_sp_AM_idx(simint, M, N);
int NM_id = CMS_Simint_get_sp_AM_idx(simint, N, M);
if (MN_id > NM_id)
{
valid_sp_lid[num_valid_sp] = M;
valid_sp_rid[num_valid_sp] = N;
} else {
valid_sp_lid[num_valid_sp] = N;
valid_sp_rid[num_valid_sp] = M;
}
num_valid_sp++;
}
}
}
TinyDFT->num_valid_sp = num_valid_sp;
quickSort(valid_sp_lid, valid_sp_rid, 0, num_valid_sp - 1);
// Create Simint shell pair structures for unique screened shell pairs
CMS_Simint_create_uniq_scr_sp(simint, num_valid_sp, valid_sp_lid, valid_sp_rid);
double et = get_wtime_sec();
TinyDFT->shell_scr_time = et - st;
// Print runtime
int num_total_sp = TinyDFT->num_total_sp;
printf(
"TinyDFT shell pair screening over, tol = %.2e, elapsed time = %.3lf (s)\n",
sqrt(shell_scrtol2), TinyDFT->shell_scr_time
);
printf(
"Screened unique shell pairs: %d out of %d (density = %.2lf%%)\n",
num_valid_sp, num_total_sp, 100.0 * (double) num_valid_sp / (double) num_total_sp
);
}
|
oracle_fmt_plug.c | /*
* This software is Copyright (c) 2004 bartavelle, <simon at banquise.net>, and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification, are permitted.
*
* UTF-8 support: Copyright magnum 2012 and hereby released to the general
* public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, is permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_oracle;
#elif FMT_REGISTERS_H
john_register_one(&fmt_oracle);
#else
#include <string.h>
#include <openssl/des.h>
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "unicode.h"
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 512
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "oracle"
#define FORMAT_NAME "Oracle 10"
#define FORMAT_TAG "O$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "DES 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 120 // worst case UTF-8 is 40 characters of Unicode, that'll do
#define BINARY_SIZE 8
#define BINARY_ALIGN 4
#define MAX_USERNAME_LEN 30
#define SALT_SIZE (MAX_USERNAME_LEN*2 + 4) // also contain the NULL
#define SALT_ALIGN 2
#define CIPHERTEXT_LENGTH 16
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
//#define DEBUG_ORACLE
static struct fmt_tests tests[] = {
{"O$SYSTEM#9EEDFA0AD26C6D52", "THALES" },
{"O$SIMON#4F8BC1809CB2AF77", "A"},
{"O$SIMON#183D72325548EF11", "THALES2" },
{"O$SIMON#C4EB3152E17F24A4", "TST" },
{"O$BOB#b02c8e79ed2e7f46", "LAPIN" },
{"O$BOB#6bb4e95898c88011", "LAPINE" },
{"O$BOB#cdc6b483874b875b", "GLOUGLOU" },
{"O$BOB#ef1f9139db2d5279", "GLOUGLOUTER" },
{"O$BOB#c0ee5107c9a080c1", "AZERTYUIOP" },
{"O$BOB#99e8b231d33772f9", "CANARDWC" },
{"O$BOB#da3224126a67c8ed", "COUCOU_COUCOU" },
{"O$bob#ec8147abb3373d53", "LONG_MOT_DE_PASSE_OUI" },
{"9EEDFA0AD26C6D52", "THALES", {"SYSTEM"} },
{"4F8BC1809CB2AF77", "A", {"SIMON"} },
{"183D72325548EF11", "THALES2", {"SIMON"} },
{"C4EB3152E17F24A4", "TST", {"SIMON"} },
{"b02c8e79ed2e7f46", "LAPIN", {"BOB"} },
{"6bb4e95898c88011", "LAPINE", {"BOB"} },
{"cdc6b483874b875b", "GLOUGLOU", {"bob"} }, // put some low case in there, to make SURE the up case conversion works.
{"ef1f9139db2d5279", "GLOUGLOUTER", {"bob"} }, // also these 2 make sure lower cased passwords 'match' the 'get_key' method in the format tests.
{"c0ee5107c9a080c1", "AZERTYUIOP", {"BOB"} },
{"99e8b231d33772f9", "CANARDWC", {"BOB"} },
{"da3224126a67c8ed", "COUCOU_COUCOU", {"BOB"} },
{"ec8147abb3373d53", "LONG_MOT_DE_PASSE_OUI", {"BOB"} },
{NULL}
};
#if ARCH_LITTLE_ENDIAN
#define ENDIAN_SHIFT_L << 8
#define ENDIAN_SHIFT_R >> 8
#else
#define ENDIAN_SHIFT_L
#define ENDIAN_SHIFT_R
#endif
static UTF16 cur_salt[SALT_SIZE / 2 + PLAINTEXT_LENGTH];
static UTF16 (*cur_key)[PLAINTEXT_LENGTH + 1];
static char (*plain_key)[PLAINTEXT_LENGTH + 1];
static int (*key_length);
static ARCH_WORD_32 (*crypt_key)[2];
static DES_key_schedule desschedule_static;
static int salt_length;
static int valid(char *ciphertext, struct fmt_main *self)
{
int i;
int l;
/*
* 2 cases
* 1 - it comes from the disk, and does not have O$ + salt
* 2 - it comes from memory, and has got O$ + salt + # + blah
*/
if (strlen(ciphertext) > CIPHERTEXT_LENGTH + 3 +
MAX_USERNAME_LEN * (options.input_enc == UTF_8 ? 3 : 1))
return 0;
if (!memcmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN))
{
int len;
char name[MAX_USERNAME_LEN + 1];
UTF16 name16[MAX_USERNAME_LEN + 1 + 1];
ciphertext += FORMAT_TAG_LEN;
l = strlen(ciphertext) - CIPHERTEXT_LENGTH;
if (l <= 0)
return 0;
if (ciphertext[l-1] != '#')
return 0;
strnzcpy(name, ciphertext, sizeof(name));
len = enc_to_utf16(name16, MAX_USERNAME_LEN + 1,
(UTF8*)name, strlen(name));
if (len < 0) {
static int error_shown = 0;
#ifdef HAVE_FUZZ
if (options.flags & (FLG_FUZZ_CHK | FLG_FUZZ_DUMP_CHK))
return 0;
#endif
if (!error_shown)
fprintf(stderr, "%s: Input file is not UTF-8. Please use --input-enc to specify a codepage.\n", self->params.label);
error_shown = 1;
return 0;
}
if (len > MAX_USERNAME_LEN)
return 0;
}
else
{
if(strlen(ciphertext)!=CIPHERTEXT_LENGTH)
return 0;
l = 0;
}
for (i = l; i < l + CIPHERTEXT_LENGTH; i++){
if (!( (('0' <= ciphertext[i])&&(ciphertext[i] <= '9')) ||
(('a' <= ciphertext[i])&&(ciphertext[i] <= 'f'))
|| (('A' <= ciphertext[i])&&(ciphertext[i] <= 'F'))))
return 0;
}
return 1;
}
static char *prepare(char *split_fields[10], struct fmt_main *self)
{
char *cp;
if (!strncmp(split_fields[1], FORMAT_TAG, FORMAT_TAG_LEN))
return split_fields[1];
if (!split_fields[0])
return split_fields[1];
cp = mem_alloc(strlen(split_fields[0]) + strlen(split_fields[1]) + 4);
sprintf (cp, "%s%s#%s", FORMAT_TAG, split_fields[0], split_fields[1]);
if (valid(cp, self))
{
UTF8 tmp8[MAX_USERNAME_LEN * 3 + 1];
int utf8len;
// we no longer need this. It was just used for valid(). We will recompute
// all lengths, after we do an upcase, since upcase can change the length of the
// utf8 string.
MEM_FREE(cp);
// Upcase user name, --encoding aware
utf8len = enc_uc(tmp8, sizeof(tmp8), (unsigned char*)split_fields[0], strlen(split_fields[0]));
cp = mem_alloc_tiny(utf8len + strlen(split_fields[1]) + 4, MEM_ALIGN_NONE);
sprintf (cp, "%s%s#%s", FORMAT_TAG, tmp8, split_fields[1]);
#ifdef DEBUG_ORACLE
printf ("tmp8 : %s\n", tmp8);
#endif
return cp;
}
MEM_FREE(cp);
return split_fields[1];
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[FORMAT_TAG_LEN + sizeof(cur_salt) + 1 + CIPHERTEXT_LENGTH];
char *cp;
strnzcpy(out, ciphertext, sizeof(out));
enc_strupper(&out[FORMAT_TAG_LEN]);
cp = strrchr(out, '#');
if (cp)
strlwr(cp);
return out;
}
static void init(struct fmt_main *self)
{
DES_set_key((DES_cblock *)"\x01\x23\x45\x67\x89\xab\xcd\xef", &desschedule_static);
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
cur_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*cur_key));
plain_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*plain_key));
crypt_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_key));
key_length = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*key_length));
}
static void done(void)
{
MEM_FREE(key_length);
MEM_FREE(crypt_key);
MEM_FREE(plain_key);
MEM_FREE(cur_key);
}
static void set_salt(void *salt) {
salt_length = ((unsigned short *)salt)[0];
memcpy(cur_salt, &((unsigned short *)salt)[1], salt_length);
}
static void oracle_set_key(char *key, int index) {
UTF16 cur_key_mixedcase[PLAINTEXT_LENGTH+1];
UTF16 *c;
strcpy(plain_key[index], key);
// Can't use enc_to_utf16_be() because we need to do utf16_uc later
key_length[index] = enc_to_utf16((UTF16 *)cur_key_mixedcase, PLAINTEXT_LENGTH, (unsigned char*)key, strlen(key));
if (key_length[index] < 0)
key_length[index] = strlen16(cur_key_mixedcase);
// We convert and uppercase in one shot
key_length[index] = utf16_uc((UTF16 *)cur_key[index], PLAINTEXT_LENGTH, cur_key_mixedcase, key_length[index]);
// we have no way to 'undo' here, since the expansion is due to single-2-multi expansion in the upcase,
// and we can not 'fix' our password. We simply have to 'not' properly decrypt this one, but protect ourselves.
if (key_length[index] < 0)
key_length[index] *= -1;
// Now byte-swap to UTF16-BE
c = cur_key[index];
while((*c = *c << 8 | *c >> 8))
c++;
key_length[index] *= sizeof(UTF16);
#ifdef DEBUG_ORACLE
dump_stuff_msg("cur_key ", (unsigned char*)&cur_key[index][0], key_length[index]);
#endif
}
static char *get_key(int index) {
static UTF8 UC_Key[PLAINTEXT_LENGTH*3*3+1];
// Calling this will ONLY upcase characters 'valid' in the code page. There are MANY
// code pages which mssql WILL upcase the letter (in UCS-2), but there is no upper case value
// in the code page. Thus we MUST keep the lower cased letter in this case.
enc_uc(UC_Key, sizeof(UC_Key), (UTF8*)plain_key[index], strlen(plain_key[index]));
return (char*)UC_Key;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int idx = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (idx = 0; idx < count; idx++)
#endif
{
unsigned char buf[sizeof(cur_salt)];
unsigned char buf2[SALT_SIZE + PLAINTEXT_LENGTH*2];
DES_key_schedule sched_local;
unsigned int l;
l = salt_length + key_length[idx];
memcpy(buf2, cur_salt, salt_length);
memcpy(buf2 + salt_length, cur_key[idx], key_length[idx]);
#ifdef DEBUG_ORACLE
dump_stuff_msg("cur_salt ", buf2, salt_length+key_length[idx]);
#endif
crypt_key[idx][0] = 0;
crypt_key[idx][1] = 0;
DES_ncbc_encrypt(buf2, buf, l, &desschedule_static, (DES_cblock *) crypt_key[idx], DES_ENCRYPT);
DES_set_key((DES_cblock *)crypt_key[idx], &sched_local);
crypt_key[idx][0] = 0;
crypt_key[idx][1] = 0;
DES_ncbc_encrypt(buf2, buf, l, &sched_local, (DES_cblock *) crypt_key[idx], DES_ENCRYPT);
#ifdef DEBUG_ORACLE
dump_stuff_msg(" crypt_key ", (unsigned char*)&crypt_key[idx][0], 8);
#endif
}
return count;
}
static void * get_binary(char *ciphertext)
{
static unsigned char *out3;
int l;
int i;
if (!out3) out3 = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD);
l = strlen(ciphertext) - CIPHERTEXT_LENGTH;
for(i=0;i<BINARY_SIZE;i++)
{
out3[i] = atoi16[ARCH_INDEX(ciphertext[i*2+l])]*16
+ atoi16[ARCH_INDEX(ciphertext[i*2+l+1])];
}
return out3;
}
static void * get_salt(char * ciphertext)
{
static UTF16 *out;
UTF8 salt[SALT_SIZE + 1];
int l;
if (!out) out = mem_alloc_tiny(SALT_SIZE+2, MEM_ALIGN_WORD);
memset(out, 0, SALT_SIZE+2);
ciphertext += FORMAT_TAG_LEN;
l = 0;
while( ciphertext[l] && (ciphertext[l]!='#') )
{
salt[l] = ciphertext[l];
l++;
if (l >= SALT_SIZE-2) break;
}
salt[l] = 0;
// Encoding-aware shift to upper-case
enc_strupper((char*)salt);
l = enc_to_utf16_be(&out[1], MAX_USERNAME_LEN, (UTF8 *)salt, l);
out[0] = (l<<1);
return out;
}
// Public domain hash function by DJ Bernstein (salt is a username)
static int salt_hash(void *salt)
{
UTF16 *s = ((UTF16*)salt) + 1;
unsigned int hash = 5381;
while (*s)
hash = ((hash << 5) + hash) ^ *s++;
return hash & (SALT_HASH_SIZE - 1);
}
static int get_hash_0(int idx) { return crypt_key[idx][0] & PH_MASK_0; }
static int get_hash_1(int idx) { return crypt_key[idx][0] & PH_MASK_1; }
static int get_hash_2(int idx) { return crypt_key[idx][0] & PH_MASK_2; }
static int get_hash_3(int idx) { return crypt_key[idx][0] & PH_MASK_3; }
static int get_hash_4(int idx) { return crypt_key[idx][0] & PH_MASK_4; }
static int get_hash_5(int idx) { return crypt_key[idx][0] & PH_MASK_5; }
static int get_hash_6(int idx) { return crypt_key[idx][0] & PH_MASK_6; }
static int cmp_all(void *binary, int count)
{
int i;
ARCH_WORD_32 b = *(ARCH_WORD_32*)binary;
for (i = 0; i < count; ++i)
if (b == *((ARCH_WORD_32*)(crypt_key[i])) )
return 1;
return 0;
}
static int cmp_one(void *binary, int idx)
{
return !memcmp(binary, crypt_key[idx], sizeof(crypt_key[idx]));
}
static int cmp_exact(char *source, int index)
{
return 1;
}
struct fmt_main fmt_oracle = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_8_BIT | FMT_UNICODE | FMT_UTF8 | FMT_SPLIT_UNIFIES_CASE | FMT_OMP,
{ NULL },
{ FORMAT_TAG },
tests
}, {
init,
done,
fmt_default_reset,
prepare,
valid,
split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
oracle_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
graph_khop_sampler_op.h | /* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <stdlib.h>
#include <numeric>
#include <random>
#include <unordered_map>
#include <vector>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/platform/place.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
template <class bidiiter>
void SampleUniqueNeighbors(bidiiter begin, bidiiter end, int num_samples) {
int left_num = std::distance(begin, end);
std::random_device rd;
std::mt19937 rng{rd()};
std::uniform_int_distribution<int> dice_distribution(
0, std::numeric_limits<int>::max());
for (int i = 0; i < num_samples; i++) {
bidiiter r = begin;
int random_step = dice_distribution(rng) % left_num;
std::advance(r, random_step);
std::swap(*begin, *r);
++begin;
--left_num;
}
}
template <class bidiiter>
void SampleUniqueNeighborsWithEids(bidiiter src_begin, bidiiter src_end,
bidiiter eid_begin, bidiiter eid_end,
int num_samples) {
int left_num = std::distance(src_begin, src_end);
std::random_device rd;
std::mt19937 rng{rd()};
std::uniform_int_distribution<int> dice_distribution(
0, std::numeric_limits<int>::max());
for (int i = 0; i < num_samples; i++) {
bidiiter r1 = src_begin, r2 = eid_begin;
int random_step = dice_distribution(rng) % left_num;
std::advance(r1, random_step);
std::advance(r2, random_step);
std::swap(*src_begin, *r1);
std::swap(*eid_begin, *r2);
++src_begin;
++eid_begin;
--left_num;
}
}
template <typename T>
void SampleNeighbors(const T* src, const T* dst_count, const T* src_eids,
std::vector<T>* inputs, std::vector<T>* outputs,
std::vector<T>* output_counts,
std::vector<T>* outputs_eids, int k, bool is_first_layer,
bool is_last_layer, bool return_eids) {
const size_t bs = inputs->size();
// Allocate the memory of outputs
// Collect the neighbors size
std::vector<std::vector<T>> out_src_vec;
std::vector<std::vector<T>> out_eids_vec;
// `sample_cumsum_sizes` record the start position and end position after the
// sample.
std::vector<size_t> sample_cumsum_sizes(bs + 1);
size_t total_neighbors = 0;
// `total_neighbors` the size of output after the sample
sample_cumsum_sizes[0] = total_neighbors;
for (size_t i = 0; i < bs; i++) {
T node = inputs->data()[i];
T begin = dst_count[node];
T end = dst_count[node + 1];
int cap = end - begin;
int sample_size = cap > k ? k : cap;
total_neighbors += sample_size;
sample_cumsum_sizes[i + 1] = total_neighbors;
std::vector<T> out_src;
out_src.resize(cap);
out_src_vec.emplace_back(out_src);
if (return_eids) {
std::vector<T> out_eids;
out_eids.resize(cap);
out_eids_vec.emplace_back(out_eids);
}
}
if (is_first_layer) {
PADDLE_ENFORCE_GT(total_neighbors, 0,
platform::errors::InvalidArgument(
"The input nodes `X` should have at "
"least one neighbors, but none of the "
"input nodes have neighbors."));
}
output_counts->resize(bs);
outputs->resize(total_neighbors);
if (return_eids) {
outputs_eids->resize(total_neighbors);
}
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
// Sample the neighbour parallelism
for (size_t i = 0; i < bs; i++) {
T node = inputs->data()[i];
T begin = dst_count[node];
T end = dst_count[node + 1];
int cap = end - begin;
if (k < cap) {
std::copy(src + begin, src + end, out_src_vec[i].begin());
if (return_eids) {
std::copy(src_eids + begin, src_eids + end, out_eids_vec[i].begin());
SampleUniqueNeighborsWithEids(
out_src_vec[i].begin(), out_src_vec[i].end(),
out_eids_vec[i].begin(), out_eids_vec[i].end(), k);
} else {
SampleUniqueNeighbors(out_src_vec[i].begin(), out_src_vec[i].end(), k);
}
*(output_counts->data() + i) = k;
} else {
std::copy(src + begin, src + end, out_src_vec[i].begin());
if (return_eids) {
std::copy(src_eids + begin, src_eids + end, out_eids_vec[i].begin());
}
*(output_counts->data() + i) = cap;
}
}
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
// Copy the results parallelism
for (size_t i = 0; i < bs; i++) {
int sample_size = sample_cumsum_sizes[i + 1] - sample_cumsum_sizes[i];
std::copy(out_src_vec[i].begin(), out_src_vec[i].begin() + sample_size,
outputs->data() + sample_cumsum_sizes[i]);
if (return_eids) {
std::copy(out_eids_vec[i].begin(), out_eids_vec[i].begin() + sample_size,
outputs_eids->data() + sample_cumsum_sizes[i]);
}
}
if (!is_last_layer) {
std::sort(inputs->begin(), inputs->end());
std::vector<T> outputs_sort(outputs->size());
std::copy(outputs->begin(), outputs->end(), outputs_sort.begin());
std::sort(outputs_sort.begin(), outputs_sort.end());
auto outputs_sort_end =
std::unique(outputs_sort.begin(), outputs_sort.end());
outputs_sort.resize(std::distance(outputs_sort.begin(), outputs_sort_end));
std::vector<T> unique_outputs(outputs_sort.size());
auto unique_outputs_end = std::set_difference(
outputs_sort.begin(), outputs_sort.end(), inputs->begin(),
inputs->end(), unique_outputs.begin());
inputs->resize(std::distance(unique_outputs.begin(), unique_outputs_end));
std::copy(unique_outputs.begin(), unique_outputs_end, inputs->begin());
}
}
template <typename DeviceContext, typename T>
class GraphKhopSamplerOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
// 1. Get sample neighbors operators' inputs.
auto* src = ctx.Input<Tensor>("Row");
auto* dst_count = ctx.Input<Tensor>("Col_Ptr");
auto* vertices = ctx.Input<Tensor>("X");
std::vector<int> sample_sizes = ctx.Attr<std::vector<int>>("sample_sizes");
bool return_eids = ctx.Attr<bool>("return_eids");
const T* src_data = src->data<T>();
const T* dst_count_data = dst_count->data<T>();
const T* p_vertices = vertices->data<T>();
const size_t bs = vertices->dims()[0];
// 2. Get unique input nodes(X).
std::vector<T> inputs(bs);
std::copy(p_vertices, p_vertices + bs, inputs.begin());
auto unique_inputs_end = std::unique(inputs.begin(), inputs.end());
inputs.resize(std::distance(inputs.begin(), unique_inputs_end));
// 3. Sample neighbors. We should distinguish w/o "Eids".
std::vector<T> outputs;
std::vector<T> output_counts;
std::vector<T> outputs_eids;
std::vector<std::vector<T>> dst_vec;
dst_vec.emplace_back(inputs);
std::vector<std::vector<T>> outputs_vec;
std::vector<std::vector<T>> output_counts_vec;
std::vector<std::vector<T>> outputs_eids_vec;
const size_t num_layers = sample_sizes.size();
bool is_last_layer = false, is_first_layer = true;
if (return_eids) {
auto* src_eids = ctx.Input<Tensor>("Eids");
const T* src_eids_data = src_eids->data<T>();
for (size_t i = 0; i < num_layers; i++) {
if (i == num_layers - 1) {
is_last_layer = true;
}
if (inputs.size() == 0) {
break;
}
if (i > 0) {
dst_vec.emplace_back(inputs);
is_first_layer = false;
}
SampleNeighbors<T>(src_data, dst_count_data, src_eids_data, &inputs,
&outputs, &output_counts, &outputs_eids,
sample_sizes[i], is_first_layer, is_last_layer,
return_eids);
outputs_vec.emplace_back(outputs);
output_counts_vec.emplace_back(output_counts);
outputs_eids_vec.emplace_back(outputs_eids);
}
} else {
for (size_t i = 0; i < num_layers; i++) {
if (i == num_layers - 1) {
is_last_layer = true;
}
if (inputs.size() == 0) {
break;
}
if (i > 0) {
is_first_layer = false;
dst_vec.emplace_back(inputs);
}
SampleNeighbors<T>(src_data, dst_count_data, nullptr, &inputs, &outputs,
&output_counts, &outputs_eids, sample_sizes[i],
is_first_layer, is_last_layer, return_eids);
outputs_vec.emplace_back(outputs);
output_counts_vec.emplace_back(output_counts);
outputs_eids_vec.emplace_back(outputs_eids);
}
}
// 4. Concat intermediate sample results.
int64_t unique_dst_size = 0, src_size = 0;
for (size_t i = 0; i < num_layers; i++) {
unique_dst_size += dst_vec[i].size();
src_size += outputs_vec[i].size();
}
std::vector<T> unique_dst_merge(unique_dst_size);
std::vector<T> src_merge(src_size);
std::vector<T> dst_sample_counts_merge(unique_dst_size);
auto unique_dst_merge_ptr = unique_dst_merge.begin();
auto src_merge_ptr = src_merge.begin();
auto dst_sample_counts_merge_ptr = dst_sample_counts_merge.begin();
// TODO(daisiming): We may try to use std::move in the future.
for (size_t i = 0; i < num_layers; i++) {
if (i == 0) {
unique_dst_merge_ptr = std::copy(dst_vec[i].begin(), dst_vec[i].end(),
unique_dst_merge.begin());
src_merge_ptr = std::copy(outputs_vec[i].begin(), outputs_vec[i].end(),
src_merge.begin());
dst_sample_counts_merge_ptr =
std::copy(output_counts_vec[i].begin(), output_counts_vec[i].end(),
dst_sample_counts_merge.begin());
} else {
unique_dst_merge_ptr = std::copy(dst_vec[i].begin(), dst_vec[i].end(),
unique_dst_merge_ptr);
src_merge_ptr = std::copy(outputs_vec[i].begin(), outputs_vec[i].end(),
src_merge_ptr);
dst_sample_counts_merge_ptr =
std::copy(output_counts_vec[i].begin(), output_counts_vec[i].end(),
dst_sample_counts_merge_ptr);
}
}
// 5. Return eids results.
if (return_eids) {
std::vector<T> eids_merge(src_size);
auto eids_merge_ptr = eids_merge.begin();
for (size_t i = 0; i < num_layers; i++) {
if (i == 0) {
eids_merge_ptr =
std::copy(outputs_eids_vec[i].begin(), outputs_eids_vec[i].end(),
eids_merge.begin());
} else {
eids_merge_ptr = std::copy(outputs_eids_vec[i].begin(),
outputs_eids_vec[i].end(), eids_merge_ptr);
}
}
auto* out_eids = ctx.Output<Tensor>("Out_Eids");
out_eids->Resize({static_cast<int>(eids_merge.size())});
T* p_out_eids = out_eids->mutable_data<T>(ctx.GetPlace());
std::copy(eids_merge.begin(), eids_merge.end(), p_out_eids);
}
int64_t num_sample_edges = std::accumulate(
dst_sample_counts_merge.begin(), dst_sample_counts_merge.end(), 0);
PADDLE_ENFORCE_EQ(
src_merge.size(), num_sample_edges,
platform::errors::PreconditionNotMet(
"Number of sample edges dismatch, the sample kernel has error."));
// 6. Reindex edges.
std::unordered_map<T, T> node_map;
std::vector<T> unique_nodes;
size_t reindex_id = 0;
for (size_t i = 0; i < unique_dst_merge.size(); i++) {
T node = unique_dst_merge[i];
unique_nodes.emplace_back(node);
node_map[node] = reindex_id++;
}
for (size_t i = 0; i < src_merge.size(); i++) {
T node = src_merge[i];
if (node_map.find(node) == node_map.end()) {
unique_nodes.emplace_back(node);
node_map[node] = reindex_id++;
}
src_merge[i] = node_map[node];
}
std::vector<T> dst_merge(src_merge.size());
size_t cnt = 0;
for (size_t i = 0; i < unique_dst_merge.size(); i++) {
for (T j = 0; j < dst_sample_counts_merge[i]; j++) {
T node = unique_dst_merge[i];
dst_merge[cnt++] = node_map[node];
}
}
// 7. Get Reindex_X for input nodes.
auto* reindex_x = ctx.Output<Tensor>("Reindex_X");
T* p_reindex_x = reindex_x->mutable_data<T>(ctx.GetPlace());
for (size_t i = 0; i < bs; i++) {
p_reindex_x[i] = node_map[p_vertices[i]];
}
// 8. Get operator's outputs.
auto* sample_index = ctx.Output<Tensor>("Sample_Index");
auto* out_src = ctx.Output<Tensor>("Out_Src");
auto* out_dst = ctx.Output<Tensor>("Out_Dst");
sample_index->Resize({static_cast<int>(unique_nodes.size())});
out_src->Resize({static_cast<int>(src_merge.size()), 1});
out_dst->Resize({static_cast<int>(src_merge.size()), 1});
T* p_sample_index = sample_index->mutable_data<T>(ctx.GetPlace());
T* p_out_src = out_src->mutable_data<T>(ctx.GetPlace());
T* p_out_dst = out_dst->mutable_data<T>(ctx.GetPlace());
std::copy(unique_nodes.begin(), unique_nodes.end(), p_sample_index);
std::copy(src_merge.begin(), src_merge.end(), p_out_src);
std::copy(dst_merge.begin(), dst_merge.end(), p_out_dst);
}
};
} // namespace operators
} // namespace paddle
|
sageInterface.h | #ifndef ROSE_SAGE_INTERFACE
#define ROSE_SAGE_INTERFACE
#include "sage3basic.hhh"
#include <stdint.h>
#include <utility>
#include "rosePublicConfig.h" // for ROSE_BUILD_JAVA_LANGUAGE_SUPPORT
#include "OmpAttribute.h"
#if 0 // FMZ(07/07/2010): the argument "nextErrorCode" should be call-by-reference
SgFile* determineFileType ( std::vector<std::string> argv, int nextErrorCode, SgProject* project );
#else
SgFile* determineFileType ( std::vector<std::string> argv, int& nextErrorCode, SgProject* project );
#endif
#ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT
#include "rewrite.h"
#endif
// DQ (7/20/2008): Added support for unparsing abitrary strings in the unparser.
#include "astUnparseAttribute.h"
#include <set>
#ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT
#include "LivenessAnalysis.h"
#include "abstract_handle.h"
#include "ClassHierarchyGraph.h"
#endif
// DQ (8/19/2004): Moved from ROSE/src/midend/astRewriteMechanism/rewrite.h
//! A global function for getting the string associated with an enum (which is defined in global scope)
ROSE_DLL_API std::string getVariantName (VariantT v);
// DQ (12/9/2004): Qing, Rich and Dan have decided to start this namespace within ROSE
// This namespace is specific to interface functions that operate on the Sage III AST.
// The name was chosen so as not to conflict with other classes within ROSE.
// This will become the future home of many interface functions which operate on
// the AST and which are generally useful to users. As a namespace multiple files can be used
// to represent the compete interface and different developers may contribute interface
// functions easily.
// Constructor handling: (We have sageBuilder.h now for this purpose, Liao 2/1/2008)
// We could add simpler layers of support for construction of IR nodes by
// hiding many details in "makeSg***()" functions. Such functions would
// return pointers to the associated Sg*** objects and would be able to hide
// many IR specific details, including:
// memory handling
// optional parameter settings not often required
// use of Sg_File_Info objects (and setting them as transformations)
//
// namespace AST_Interface (this name is taken already by some of Qing's work :-)
//! An alias for Sg_File_Info::generateDefaultFileInfoForTransformationNode()
#define TRANS_FILE Sg_File_Info::generateDefaultFileInfoForTransformationNode()
/** Functions that are useful when operating on the AST.
*
* The Sage III IR design attempts to be minimalist. Thus additional functionality is intended to be presented using separate
* higher level interfaces which work with the IR. This namespace collects functions that operate on the IR and support
* numerous types of operations that are common to general analysis and transformation of the AST. */
namespace SageInterface
{
// Liao 6/22/2016: keep records of loop init-stmt normalization, later help undo it to support autoPar.
struct Transformation_Record
{
// a lookup table to check if a for loop has been normalized for its c99-style init-stmt
std::map <SgForStatement* , bool > forLoopInitNormalizationTable;
// Detailed record about the original declaration (1st in the pair) and the normalization generated new declaration (2nd in the pair)
std::map <SgForStatement* , std::pair<SgVariableDeclaration*, SgVariableDeclaration*> > forLoopInitNormalizationRecord;
} ;
ROSE_DLL_API extern Transformation_Record trans_records;
// DQ (4/3/2014): Added general AST support separate from the AST.
// Container and API for analysis information that is outside of the AST and as a result
// prevents frequent modification of the IR.
class DeclarationSets
{
// DQ (4/3/2014): This stores all associated declarations as a map of sets.
// the key to the map is the first nondefining declaration and the elements of the set are
// all of the associated declarations (including the defining declaration).
private:
//! Map of first-nondefining declaration to all other associated declarations.
std::map<SgDeclarationStatement*,std::set<SgDeclarationStatement*>* > declarationMap;
public:
void addDeclaration(SgDeclarationStatement* decl);
const std::set<SgDeclarationStatement*>* getDeclarations(SgDeclarationStatement* decl);
std::map<SgDeclarationStatement*,std::set<SgDeclarationStatement*>* > & getDeclarationMap();
bool isLocatedInDefiningScope(SgDeclarationStatement* decl);
};
// DQ (4/3/2014): This constructs a data structure that holds analysis information about
// the AST that is separate from the AST. This is intended to be a general mechanism
// to support analysis information without constantly modifying the IR.
DeclarationSets* buildDeclarationSets(SgNode*);
//! An internal counter for generating unique SgName
ROSE_DLL_API extern int gensym_counter;
// tps : 28 Oct 2008 - support for finding the main interpretation
SgAsmInterpretation* getMainInterpretation(SgAsmGenericFile* file);
//! Get the unsigned value of a disassembled constant.
uint64_t getAsmConstant(SgAsmValueExpression* e);
//! Get the signed value of a disassembled constant.
int64_t getAsmSignedConstant(SgAsmValueExpression *e);
//! Function to add "C" style comment to statement.
void addMessageStatement( SgStatement* stmt, std::string message );
//! A persistent attribute to represent a unique name for an expression
class UniqueNameAttribute : public AstAttribute
{
private:
std::string name;
public:
UniqueNameAttribute(std::string n="") {name =n; };
void set_name (std::string n) {name = n;};
std::string get_name () {return name;};
};
// DQ (3/2/2009): Added support for collectiong an merging the referenced symbols in the outlined
// function into the list used to edit the outlined code subtree to fixup references (from symbols
// in the original file to the symbols in the newer separate file).
// typedef rose_hash::unordered_map<SgNode*, SgNode*, hash_nodeptr> ReplacementMapType;
// void supplementReplacementSymbolMap ( const ReplacementMapTraversal::ReplacementMapType & inputReplacementMap );
// CH (4/9/2010): Use boost::hash instead
//#ifdef _MSC_VER
#if 0
inline size_t hash_value(SgNode* t) {return (size_t)t;}
#endif
#if 0
// DQ (8/3/2015): We expect that this is not used and is generating a warnings so we
// can best fix it by removing it.
struct hash_nodeptr
{
// CH (4/9/2010): Use boost::hash instead
//#ifndef _MSC_VER
#if 0
//rose_hash::hash<char*> hasher;
#endif
public:
size_t operator()(SgNode* node) const
{
// CH (4/9/2010): Use boost::hash instead
//#ifdef _MSC_VER
#if 0
return (size_t) hash_value(node);
#else
return (size_t) node;
#endif
}
};
#ifndef SWIG
// DQ (3/10/2013): This appears to be a problem for the SWIG interface (undefined reference at link-time).
void supplementReplacementSymbolMap ( rose_hash::unordered_map<SgNode*, SgNode*, hash_nodeptr> & inputReplacementMap );
#endif
#endif
//------------------------------------------------------------------------
//@{
/*! @name Symbol tables
\brief utility functions for symbol tables
*/
// Liao 1/22/2008, used for get symbols for generating variable reference nodes
// ! Find a variable symbol in current and ancestor scopes for a given name
ROSE_DLL_API SgVariableSymbol *lookupVariableSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope=NULL);
// DQ (8/21/2013): Modified to make newest function parameters be default arguments.
// DQ (8/16/2013): For now we want to remove the use of default parameters and add the support for template parameters and template arguments.
//! Find a symbol in current and ancestor scopes for a given variable name, starting from top of ScopeStack if currentscope is not given or NULL.
// SgSymbol *lookupSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope=NULL);
// SgSymbol *lookupSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope, SgTemplateParameterPtrList* templateParameterList, SgTemplateArgumentPtrList* templateArgumentList);
ROSE_DLL_API SgSymbol *lookupSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL, SgTemplateArgumentPtrList* templateArgumentList = NULL);
// DQ (11/24/2007): Functions moved from the Fortran support so that they could be called from within astPostProcessing.
//!look up the first matched function symbol in parent scopes given only a function name, starting from top of ScopeStack if currentscope is not given or NULL
ROSE_DLL_API SgFunctionSymbol *lookupFunctionSymbolInParentScopes (const SgName & functionName, SgScopeStatement *currentScope=NULL);
// Liao, 1/24/2008, find exact match for a function
//!look up function symbol in parent scopes given both name and function type, starting from top of ScopeStack if currentscope is not given or NULL
ROSE_DLL_API SgFunctionSymbol *lookupFunctionSymbolInParentScopes (const SgName & functionName,
const SgType* t,
SgScopeStatement *currentScope=NULL);
ROSE_DLL_API SgFunctionSymbol *lookupTemplateFunctionSymbolInParentScopes (const SgName & functionName, SgFunctionType * ftype, SgTemplateParameterPtrList * tplparams, SgScopeStatement *currentScope=NULL);
ROSE_DLL_API SgFunctionSymbol *lookupTemplateMemberFunctionSymbolInParentScopes (const SgName & functionName, SgFunctionType * ftype, SgTemplateParameterPtrList * tplparams, SgScopeStatement *currentScope=NULL);
ROSE_DLL_API SgTemplateVariableSymbol * lookupTemplateVariableSymbolInParentScopes (const SgName & name, SgTemplateParameterPtrList * tplparams, SgTemplateArgumentPtrList* tplargs, SgScopeStatement *currentScope=NULL);
// DQ (8/21/2013): Modified to make newest function parameters be default arguments.
// DQ (8/16/2013): For now we want to remove the use of default parameters and add the support for template parameters and template arguments.
// DQ (5/7/2011): Added support for SgClassSymbol (used in name qualification support).
// SgClassSymbol* lookupClassSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL);
ROSE_DLL_API SgClassSymbol* lookupClassSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateArgumentPtrList* templateArgumentList = NULL);
ROSE_DLL_API SgTypedefSymbol* lookupTypedefSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL);
ROSE_DLL_API SgNonrealSymbol* lookupNonrealSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL, SgTemplateArgumentPtrList* templateArgumentList = NULL);
#if 0
// DQ (8/13/2013): This function does not make since any more, now that we have made the symbol
// table handling more precise and we have to provide template parameters for any template lookup.
// We also have to know if we want to lookup template classes, template functions, or template
// member functions (since each have specific requirements).
SgTemplateSymbol* lookupTemplateSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL);
#endif
#if 0
// DQ (8/13/2013): I am not sure if we want this functions in place of lookupTemplateSymbolInParentScopes.
// Where these are called we might not know enough information about the template parameters or function
// types, for example.
SgTemplateClassSymbol* lookupTemplateClassSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL, SgTemplateArgumentPtrList* templateArgumentList = NULL);
SgTemplateFunctionSymbol* lookupTemplateFunctionSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL);
SgTemplateMemberFunctionSymbol* lookupTemplateMemberFunctionSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL);
#endif
// DQ (8/21/2013): Modified to make some of the newest function parameters be default arguments.
// DQ (8/13/2013): I am not sure if we want this functions in place of lookupTemplateSymbolInParentScopes.
ROSE_DLL_API SgTemplateClassSymbol* lookupTemplateClassSymbolInParentScopes (const SgName & name, SgTemplateParameterPtrList* templateParameterList, SgTemplateArgumentPtrList* templateArgumentList, SgScopeStatement *cscope = NULL);
ROSE_DLL_API SgEnumSymbol* lookupEnumSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL);
ROSE_DLL_API SgNamespaceSymbol* lookupNamespaceSymbolInParentScopes(const SgName & name, SgScopeStatement *currentScope = NULL);
// DQ (7/17/2011): Added function from cxx branch that I need here for the Java support.
// SgClassSymbol* lookupClassSymbolInParentScopes (const SgName & name, SgScopeStatement *cscope);
/*! \brief set_name of symbol in symbol table.
This function extracts the symbol from the relavant symbol table,
changes the name (at the declaration) and reinserts it into the
symbol table.
\internal I think this is what this function does, I need to double check.
*/
// DQ (12/9/2004): Moved this function (by Alin Jula) from being a member of SgInitializedName
// to this location where it can be a part of the interface for the Sage III AST.
ROSE_DLL_API int set_name (SgInitializedName * initializedNameNode, SgName new_name);
/*! \brief Output function type symbols in global function type symbol table.
*/
void outputGlobalFunctionTypeSymbolTable ();
// DQ (6/27/2005):
/*! \brief Output the local symbol tables.
\implementation Each symbol table is output with the file infor where it is located in the source code.
*/
ROSE_DLL_API void outputLocalSymbolTables (SgNode * node);
class OutputLocalSymbolTables:public AstSimpleProcessing
{
public:
void visit (SgNode * node);
};
/*! \brief Regenerate the symbol table.
\implementation current symbol table must be NULL pointer before calling this
function (for safety, but is this a good idea?)
*/
// DQ (9/28/2005):
void rebuildSymbolTable (SgScopeStatement * scope);
/*! \brief Clear those variable symbols with unknown type (together with initialized names) which are also not referenced by any variable references or declarations under root. If root is NULL, all symbols with unknown type will be deleted.
*/
void clearUnusedVariableSymbols (SgNode* root = NULL);
// DQ (3/1/2009):
//! All the symbol table references in the copied AST need to be reset after rebuilding the copied scope's symbol table.
void fixupReferencesToSymbols( const SgScopeStatement* this_scope, SgScopeStatement* copy_scope, SgCopyHelp & help );
//@}
//------------------------------------------------------------------------
//@{
/*! @name Stringify
\brief Generate a useful string (name) to describe a SgNode
*/
/*! \brief Generate a useful name to describe the SgNode
\internal default names are used for SgNode objects that can not be associated with a name.
*/
// DQ (9/21/2005): General function for extracting the name of declarations (when they have names)
std::string get_name (const SgNode * node);
/*! \brief Generate a useful name to describe the declaration
\internal default names are used for declarations that can not be associated with a name.
*/
// DQ (6/13/2005): General function for extracting the name of declarations (when they have names)
std::string get_name (const SgStatement * stmt);
/*! \brief Generate a useful name to describe the expression
\internal default names are used for expressions that can not be associated with a name.
*/
std::string get_name (const SgExpression * expr);
/*! \brief Generate a useful name to describe the declaration
\internal default names are used for declarations that can not be associated with a name.
*/
// DQ (6/13/2005): General function for extracting the name of declarations (when they have names)
std::string get_name (const SgDeclarationStatement * declaration);
/*! \brief Generate a useful name to describe the scope
\internal default names are used for scope that cannot be associated with a name.
*/
// DQ (6/13/2005): General function for extracting the name of declarations (when they have names)
std::string get_name (const SgScopeStatement * scope);
/*! \brief Generate a useful name to describe the SgSymbol
\internal default names are used for SgSymbol objects that cannot be associated with a name.
*/
// DQ (2/11/2007): Added this function to make debugging support more complete (useful for symbol table debugging support).
std::string get_name (const SgSymbol * symbol);
/*! \brief Generate a useful name to describe the SgType
\internal default names are used for SgType objects that cannot be associated with a name.
*/
std::string get_name (const SgType * type);
/*! \brief Generate a useful name to describe the SgSupport IR node
*/
std::string get_name (const SgSupport * node);
/*! \brief Generate a useful name to describe the SgLocatedNodeSupport IR node
*/
std::string get_name (const SgLocatedNodeSupport * node);
/*! \brief Generate a useful name to describe the SgC_PreprocessorDirectiveStatement IR node
*/
std::string get_name ( const SgC_PreprocessorDirectiveStatement* directive );
/*! \brief Generate a useful name to describe the SgToken IR node
*/
std::string get_name ( const SgToken* token );
// DQ (3/20/2016): Added to refactor some of the DSL infrastructure support.
/*! \brief Generate a useful name to support construction of identifiers from declarations.
This function permits names to be generated that will be unique across translation units
(a specific requirement different from the context of the get_name() functions above).
\internal This supports only a restricted set of declarations presently.
*/
std::string generateUniqueNameForUseAsIdentifier ( SgDeclarationStatement* declaration );
std::string generateUniqueNameForUseAsIdentifier_support ( SgDeclarationStatement* declaration );
/*! \brief Global map of name collisions to support generateUniqueNameForUseAsIdentifier() function.
*/
extern std::map<std::string,int> local_name_collision_map;
extern std::map<std::string,SgNode*> local_name_to_node_map;
extern std::map<SgNode*,std::string> local_node_to_name_map;
/*! \brief Traversal to set the global map of names to node and node to names.collisions to support generateUniqueNameForUseAsIdentifier() function.
*/
void computeUniqueNameForUseAsIdentifier( SgNode* astNode );
/*! \brief Reset map variables used to support generateUniqueNameForUseAsIdentifier() function.
*/
void reset_name_collision_map();
//@}
//------------------------------------------------------------------------
//@{
/*! @name Class utilities
\brief
*/
/*! \brief Get the default destructor from the class declaration
*/
// DQ (6/21/2005): Get the default destructor from the class declaration
SgMemberFunctionDeclaration *getDefaultDestructor (SgClassDeclaration *
classDeclaration);
/*! \brief Get the default constructor from the class declaration
*/
// DQ (6/22/2005): Get the default constructor from the class declaration
ROSE_DLL_API SgMemberFunctionDeclaration *getDefaultConstructor (SgClassDeclaration *
classDeclaration);
/*! \brief Return true if template definition is in the class, false if outside of class.
*/
// DQ (8/27/2005):
bool templateDefinitionIsInClass (SgTemplateInstantiationMemberFunctionDecl
* memberFunctionDeclaration);
/*! \brief Generate a non-defining (forward) declaration from a defining function declaration.
\internal should put into sageBuilder ?
*/
// DQ (9/17/2005):
SgTemplateInstantiationMemberFunctionDecl*
buildForwardFunctionDeclaration
(SgTemplateInstantiationMemberFunctionDecl * memberFunctionInstantiation);
//! Check if a SgNode is a declaration for a structure
bool isStructDeclaration(SgNode * node);
//! Check if a SgNode is a declaration for a union
bool isUnionDeclaration(SgNode * node);
#if 0
// DQ (8/28/2005): This is already a member function of the SgFunctionDeclaration
// (so that it can handle template functions and member functions)
/*! \brief Return true if member function of a template member function,
of false if a non-template member function in a templated class.
*/
// DQ (8/27/2005):
bool isTemplateMemberFunction (SgTemplateInstantiationMemberFunctionDecl *
memberFunctionDeclaration);
#endif
//@}
//------------------------------------------------------------------------
//@{
/*! @name Misc.
\brief Not sure the classifications right now
*/
//! Save AST into a pdf file. Start from a node to find its enclosing file node. The entire file's AST will be saved into a pdf.
void saveToPDF(SgNode* node, std::string filename);
void saveToPDF(SgNode* node); // enable calling from gdb
// DQ (2/12/2012): Added some diagnostic support.
//! Diagnostic function for tracing back through the parent list to understand at runtime where in the AST a failure happened.
void whereAmI(SgNode* node);
//! Extract a SgPragmaDeclaration's leading keyword . For example "#pragma omp parallel" has a keyword of "omp".
std::string extractPragmaKeyword(const SgPragmaDeclaration *);
//! Check if a node is SgOmp*Statement
ROSE_DLL_API bool isOmpStatement(SgNode* );
/*! \brief Return true if function is overloaded.
*/
// DQ (8/27/2005):
bool isOverloaded (SgFunctionDeclaration * functionDeclaration);
// DQ (2/14/2012): Added support function used for variable declarations in conditionals.
//! Support function used for variable declarations in conditionals
void initializeIfStmt(SgIfStmt *ifstmt, SgStatement* conditional, SgStatement * true_body, SgStatement * false_body);
//! Support function used for variable declarations in conditionals
void initializeSwitchStatement(SgSwitchStatement* switchStatement,SgStatement *item_selector,SgStatement *body);
//! Support function used for variable declarations in conditionals
void initializeWhileStatement(SgWhileStmt* whileStatement, SgStatement * condition, SgStatement *body, SgStatement *else_body);
//! Generate unique names for expressions and attach the names as persistent attributes ("UniqueNameAttribute")
void annotateExpressionsWithUniqueNames (SgProject* project);
//! Check if a SgNode is a main() function declaration
ROSE_DLL_API bool isMain (const SgNode* node);
// DQ (6/22/2005):
/*! \brief Generate unique name from C and C++ constructs. The name may contain space.
This is support for the AST merge, but is generally useful as a more general mechanism than
name mangling which is more closely ties to the generation of names to support link-time function name
resolution. This is more general than common name mangling in that it resolves more relevant differences
between C and C++ declarations. (e.g. the type within the declaration: "struct { int:8; } foo;").
\implementation current work does not support expressions.
*/
std::string generateUniqueName ( const SgNode * node, bool ignoreDifferenceBetweenDefiningAndNondefiningDeclarations);
/** Generate a name like __temp#__ that is unique in the current scope and any parent and children scopes. # is a unique integer counter.
* @param baseName the word to be included in the variable names. */
std::string generateUniqueVariableName(SgScopeStatement* scope, std::string baseName = "temp");
// DQ (8/10/2010): Added const to first parameter.
// DQ (3/10/2007):
//! Generate a unique string from the source file position information
std::string declarationPositionString (const SgDeclarationStatement * declaration);
// DQ (1/20/2007):
//! Added mechanism to generate project name from list of file names
ROSE_DLL_API std::string generateProjectName (const SgProject * project, bool supressSuffix = false );
//! Given a SgExpression that represents a named function (or bound member
//! function), return the mentioned function
SgFunctionDeclaration* getDeclarationOfNamedFunction(SgExpression* func);
//! Get the mask expression from the header of a SgForAllStatement
SgExpression* forallMaskExpression(SgForAllStatement* stmt);
//! Find all SgPntrArrRefExp under astNode, then add SgVarRefExp (if any) of SgPntrArrRefExp's dim_info into NodeList_t
void addVarRefExpFromArrayDimInfo(SgNode * astNode, Rose_STL_Container<SgNode *>& NodeList_t);
// DQ (10/6/2006): Added support for faster mangled name generation (caching avoids recomputation).
/*! \brief Support for faster mangled name generation (caching avoids recomputation).
*/
#ifndef SWIG
// DQ (3/10/2013): This appears to be a problem for the SWIG interface (undefined reference at link-time).
void clearMangledNameCache (SgGlobal * globalScope);
void resetMangledNameCache (SgGlobal * globalScope);
#endif
std::string getMangledNameFromCache (SgNode * astNode);
std::string addMangledNameToCache (SgNode * astNode, const std::string & mangledName);
SgDeclarationStatement * getNonInstantiatonDeclarationForClass (SgTemplateInstantiationMemberFunctionDecl * memberFunctionInstantiation);
//! a better version for SgVariableDeclaration::set_baseTypeDefininingDeclaration(), handling all side effects automatically
//! Used to have a struct declaration embedded into a variable declaration
void setBaseTypeDefiningDeclaration(SgVariableDeclaration* var_decl, SgDeclarationStatement *base_decl);
// DQ (10/14/2006): This function tests the AST to see if for a non-defining declaration, the
// bool declarationPreceedsDefinition ( SgClassDeclaration* classNonDefiningDeclaration, SgClassDeclaration* classDefiningDeclaration );
//! Check if a defining declaration comes before of after the non-defining declaration.
bool declarationPreceedsDefinition (SgDeclarationStatement *nonDefiningDeclaration, SgDeclarationStatement *definingDeclaration);
// DQ (10/19/2006): Function calls have interesting context dependent rules to determine if
// they are output with a global qualifier or not. Were this is true we have to avoid global
// qualifiers, since the function's scope has not been defined. This is an example of where
// qualification of function names in function calls are context dependent; an interesting
// example of where the C++ language is not friendly to source-to-source processing :-).
bool functionCallExpressionPreceedsDeclarationWhichAssociatesScope (SgFunctionCallExp * functionCall);
/*! \brief Compute the intersection set for two ASTs.
This is part of a test done by the copy function to compute those IR nodes in the copy that still reference the original AST.
*/
ROSE_DLL_API std::vector < SgNode * >astIntersection (SgNode * original, SgNode * copy, SgCopyHelp * help = NULL);
//! Deep copy an arbitrary subtree
ROSE_DLL_API SgNode* deepCopyNode (const SgNode* subtree);
//! A template function for deep copying a subtree. It is also used to create deepcopy functions with specialized parameter and return types. e.g SgExpression* copyExpression(SgExpression* e);
template <typename NodeType>
NodeType* deepCopy (const NodeType* subtree) {
return dynamic_cast<NodeType*>(deepCopyNode(subtree));
}
//! Deep copy an expression
ROSE_DLL_API SgExpression* copyExpression(SgExpression* e);
//!Deep copy a statement
ROSE_DLL_API SgStatement* copyStatement(SgStatement* s);
// from VarSym.cc in src/midend/astOutlining/src/ASTtools
//! Get the variable symbol for the first initialized name of a declaration stmt.
ROSE_DLL_API SgVariableSymbol* getFirstVarSym (SgVariableDeclaration* decl);
//! Get the first initialized name of a declaration statement
ROSE_DLL_API SgInitializedName* getFirstInitializedName (SgVariableDeclaration* decl);
//! A special purpose statement removal function, originally from inlinerSupport.h, Need Jeremiah's attention to refine it. Please don't use it for now.
ROSE_DLL_API void myRemoveStatement(SgStatement* stmt);
ROSE_DLL_API bool isConstantTrue(SgExpression* e);
ROSE_DLL_API bool isConstantFalse(SgExpression* e);
ROSE_DLL_API bool isCallToParticularFunction(SgFunctionDeclaration* decl, SgExpression* e);
ROSE_DLL_API bool isCallToParticularFunction(const std::string& qualifiedName, size_t arity, SgExpression* e);
//! Check if a declaration has a "static' modifier
bool ROSE_DLL_API isStatic(SgDeclarationStatement* stmt);
//! Set a declaration as static
ROSE_DLL_API void setStatic(SgDeclarationStatement* stmt);
//! Check if a declaration has an "extern" modifier
ROSE_DLL_API bool isExtern(SgDeclarationStatement* stmt);
//! Set a declaration as extern
ROSE_DLL_API void setExtern(SgDeclarationStatement* stmt);
//! Interface for creating a statement whose computation writes its answer into
//! a given variable.
class StatementGenerator {
public:
virtual ~StatementGenerator() {};
virtual SgStatement* generate(SgExpression* where_to_write_answer) = 0;
};
//! Check if a SgNode _s is an assignment statement (any of =,+=,-=,&=,/=, ^=, etc)
//!
//! Return the left hand, right hand expressions and if the left hand variable is also being read
bool isAssignmentStatement(SgNode* _s, SgExpression** lhs=NULL, SgExpression** rhs=NULL, bool* readlhs=NULL);
//! Variable references can be introduced by SgVarRef, SgPntrArrRefExp, SgInitializedName, SgMemberFunctionRef etc. For Dot and Arrow Expressions, their lhs is used to obtain SgInitializedName (coarse grain) by default. Otherwise, fine-grain rhs is used.
ROSE_DLL_API SgInitializedName* convertRefToInitializedName(SgNode* current, bool coarseGrain=true);
//! Build an abstract handle from an AST node, reuse previously built handle when possible
ROSE_DLL_API AbstractHandle::abstract_handle* buildAbstractHandle(SgNode*);
//! Obtain a matching SgNode from an abstract handle string
ROSE_DLL_API SgNode* getSgNodeFromAbstractHandleString(const std::string& input_string);
//! Dump information about a SgNode for debugging
ROSE_DLL_API void dumpInfo(SgNode* node, std::string desc="");
//! Reorder a list of declaration statements based on their appearance order in source files
ROSE_DLL_API std::vector<SgDeclarationStatement*>
sortSgNodeListBasedOnAppearanceOrderInSource(const std::vector<SgDeclarationStatement*>& nodevec);
// DQ (4/13/2013): We need these to support the unparing of operators defined by operator syntax or member function names.
//! Is an overloaded operator a prefix operator (e.g. address operator X * operator&(), dereference operator X & operator*(), unary plus operator X & operator+(), etc.
// bool isPrefixOperator( const SgMemberFunctionRefExp* memberFunctionRefExp );
bool isPrefixOperator( SgExpression* exp );
//! Check for proper names of possible prefix operators (used in isPrefixOperator()).
bool isPrefixOperatorName( const SgName & functionName );
//! Is an overloaded operator a postfix operator. (e.g. ).
bool isPostfixOperator( SgExpression* exp );
//! Is an overloaded operator an index operator (also referred to as call or subscript operators). (e.g. X & operator()() or X & operator[]()).
bool isIndexOperator( SgExpression* exp );
// DQ (1/10/2014): Adding more general support for token based unparsing.
//! Used to support token unparsing (when the output the trailing token sequence).
SgStatement* lastStatementOfScopeWithTokenInfo (SgScopeStatement* scope, std::map<SgNode*,TokenStreamSequenceToNodeMapping*> & tokenStreamSequenceMap);
//@}
//------------------------------------------------------------------------
//@{
/*! @name AST properties
\brief version, language properties of current AST.
*/
// std::string version(); // utility_functions.h, version number
/*! Brief These traverse the memory pool of SgFile IR nodes and determine what languages are in use!
*/
ROSE_DLL_API bool is_Ada_language ();
ROSE_DLL_API bool is_C_language ();
ROSE_DLL_API bool is_Cobol_language ();
ROSE_DLL_API bool is_OpenMP_language ();
ROSE_DLL_API bool is_UPC_language ();
//! Check if dynamic threads compilation is used for UPC programs
ROSE_DLL_API bool is_UPC_dynamic_threads();
ROSE_DLL_API bool is_C99_language ();
ROSE_DLL_API bool is_Cxx_language ();
ROSE_DLL_API bool is_Java_language ();
ROSE_DLL_API bool is_Jovial_language ();
ROSE_DLL_API bool is_Fortran_language ();
ROSE_DLL_API bool is_CAF_language ();
ROSE_DLL_API bool is_PHP_language();
ROSE_DLL_API bool is_Python_language();
ROSE_DLL_API bool is_Cuda_language();
ROSE_DLL_API bool is_OpenCL_language();
ROSE_DLL_API bool is_X10_language();
ROSE_DLL_API bool is_binary_executable();
ROSE_DLL_API bool is_mixed_C_and_Cxx_language ();
ROSE_DLL_API bool is_mixed_Fortran_and_C_language ();
ROSE_DLL_API bool is_mixed_Fortran_and_Cxx_language ();
ROSE_DLL_API bool is_mixed_Fortran_and_C_and_Cxx_language ();
//@}
//------------------------------------------------------------------------
//@{
/*! @name Scope
\brief
*/
// DQ (10/5/2006): Added support for faster (non-quadratic) computation of unique
// labels for scopes in a function (as required for name mangling).
/*! \brief Assigns unique numbers to each SgScopeStatement of a function.
This is used to provide unique names for variables and types defined is
different nested scopes of a function (used in mangled name generation).
*/
void resetScopeNumbers (SgFunctionDefinition * functionDeclaration);
// DQ (10/5/2006): Added support for faster (non-quadratic) computation of unique
// labels for scopes in a function (as required for name mangling).
/*! \brief Clears the cache of scope,integer pairs for the input function.
This is used to clear the cache of computed unique labels for scopes in a function.
This function should be called after any transformation on a function that might effect
the allocation of scopes and cause the existing unique numbers to be incorrect.
This is part of support to provide unique names for variables and types defined is
different nested scopes of a function (used in mangled name generation).
*/
void clearScopeNumbers (SgFunctionDefinition * functionDefinition);
//!Find the enclosing namespace of a declaration
SgNamespaceDefinitionStatement * enclosingNamespaceScope (SgDeclarationStatement * declaration);
// SgNamespaceDefinitionStatement * getEnclosingNamespaceScope (SgNode * node);
bool isPrototypeInScope (SgScopeStatement * scope,
SgFunctionDeclaration * functionDeclaration,
SgDeclarationStatement * startingAtDeclaration);
//!check if node1 is a strict ancestor of node 2. (a node is not considered its own ancestor)
bool ROSE_DLL_API isAncestor(SgNode* node1, SgNode* node2);
//@}
//------------------------------------------------------------------------
//@{
/*! @name Preprocessing Information
\brief #if-#else-#end, comments, #include, etc
*/
//! Dumps a located node's preprocessing information.
void dumpPreprocInfo (SgLocatedNode* locatedNode);
//! Insert #include "filename" or #include <filename> (system header) onto the global scope of a source file, add to be the last #include .. by default among existing headers, Or as the first header. Recommended for use.
PreprocessingInfo * insertHeader(SgSourceFile * source_file, const std::string & header_file_name, bool isSystemHeader, bool asLastHeader);
//! Insert a new header right before stmt, if there are existing headers attached to stmt, insert it as the last or first header as specified by asLastHeader
void insertHeader (SgStatement* stmt, PreprocessingInfo* newheader, bool asLastHeader);
//! Insert #include "filename" or #include <filename> (system header) onto the global scope of a source file
PreprocessingInfo * insertHeader(SgSourceFile * source_file, const std::string & header_file_name, bool isSystemHeader = false, PreprocessingInfo::RelativePositionType position = PreprocessingInfo::before);
//! Insert #include "filename" or #include <filename> (system header) into the global scope containing the current scope, right after other #include XXX.
ROSE_DLL_API PreprocessingInfo* insertHeader(const std::string& filename, PreprocessingInfo::RelativePositionType position=PreprocessingInfo::after, bool isSystemHeader=false, SgScopeStatement* scope=NULL);
//! Identical to movePreprocessingInfo(), except for the stale name and confusing order of parameters. It will be deprecated soon.
ROSE_DLL_API void moveUpPreprocessingInfo (SgStatement* stmt_dst, SgStatement* stmt_src, PreprocessingInfo::RelativePositionType src_position=PreprocessingInfo::undef, PreprocessingInfo::RelativePositionType dst_position=PreprocessingInfo::undef, bool usePrepend= false);
//! Move preprocessing information of stmt_src to stmt_dst, Only move preprocessing information from the specified source-relative position to a specified target position, otherwise move all preprocessing information with position information intact. The preprocessing information is appended to the existing preprocessing information list of the target node by default. Prepending is used if usePreprend is set to true. Optionally, the relative position can be adjust after the moving using dst_position.
ROSE_DLL_API void movePreprocessingInfo (SgStatement* stmt_src, SgStatement* stmt_dst, PreprocessingInfo::RelativePositionType src_position=PreprocessingInfo::undef,
PreprocessingInfo::RelativePositionType dst_position=PreprocessingInfo::undef, bool usePrepend= false);
//!Cut preprocessing information from a source node and save it into a buffer. Used in combination of pastePreprocessingInfo(). The cut-paste operation is similar to moveUpPreprocessingInfo() but it is more flexible in that the destination node can be unknown during the cut operation.
ROSE_DLL_API void cutPreprocessingInfo (SgLocatedNode* src_node, PreprocessingInfo::RelativePositionType pos, AttachedPreprocessingInfoType& save_buf);
//!Paste preprocessing information from a buffer to a destination node. Used in combination of cutPreprocessingInfo()
ROSE_DLL_API void pastePreprocessingInfo (SgLocatedNode* dst_node, PreprocessingInfo::RelativePositionType pos, AttachedPreprocessingInfoType& saved_buf);
//! Attach an arbitrary string to a located node. A workaround to insert irregular statements or vendor-specific attributes.
ROSE_DLL_API PreprocessingInfo* attachArbitraryText(SgLocatedNode* target,
const std::string & text,
PreprocessingInfo::RelativePositionType position=PreprocessingInfo::before);
//!Check if a pragma declaration node has macro calls attached, if yes, replace macro calls within the pragma string with expanded strings. This only works if -rose:wave is turned on.
ROSE_DLL_API void replaceMacroCallsWithExpandedStrings(SgPragmaDeclaration* target);
//@}
//! Build and attach comment onto the global scope of a source file
PreprocessingInfo* attachComment(
SgSourceFile * source_file,
const std::string & content,
PreprocessingInfo::DirectiveType directive_type = PreprocessingInfo::C_StyleComment,
PreprocessingInfo::RelativePositionType position = PreprocessingInfo::before
);
//! Build and attach comment, comment style is inferred from the language type of the target node if not provided
ROSE_DLL_API PreprocessingInfo* attachComment(SgLocatedNode* target, const std::string & content,
PreprocessingInfo::RelativePositionType position=PreprocessingInfo::before,
PreprocessingInfo::DirectiveType dtype= PreprocessingInfo::CpreprocessorUnknownDeclaration);
// DQ (11/25/2009): Added matching support for adding comments to SgAsm nodes.
// Build and attach comment
// void attachComment(SgAsmStatement* target, const std::string & content );
// DQ (7/20/2008): I am not clear were I should put this function, candidates include: SgLocatedNode or SgInterface
//! Add a string to be unparsed to support code generation for back-end specific tools or compilers.
ROSE_DLL_API void addTextForUnparser ( SgNode* astNode, std::string s, AstUnparseAttribute::RelativePositionType inputlocation );
/**
* Add preproccessor guard around a given node.
* It surrounds the node with "#if guard" and "#endif"
*/
void guardNode(SgLocatedNode * target, std::string guard);
//@}
//------------------------------------------------------------------------
//@{
/*! @name Source File Position
\brief set Sg_File_Info for a SgNode
*/
// ************************************************************************
// Newer versions of now depricated functions
// ************************************************************************
// DQ (5/1/2012): This function queries the SageBuilder::SourcePositionClassification mode (stored in the SageBuilder
// interface) and used the specified mode to initialize the source position data (Sg_File_Info objects). This
// function is the only function that should be called directly (though in a namespace we can't define permissions).
//! Set the source code positon for the current (input) node.
ROSE_DLL_API void setSourcePosition(SgNode* node);
// A better name might be "setSourcePositionForSubTree"
//! Set the source code positon for the subtree (including the root).
ROSE_DLL_API void setSourcePositionAtRootAndAllChildren(SgNode *root);
//! DQ (5/1/2012): New function with improved name.
void setSourcePositionAsTransformation(SgNode *node);
// DQ (5/1/2012): Newly renamed function (previous name preserved for backward compatability).
void setSourcePositionPointersToNull(SgNode *node);
// ************************************************************************
// ************************************************************************
// Older deprecated functions
// ************************************************************************
// Liao, 1/8/2007, set file info. for a whole subtree as transformation generated
//! Set current node's source position as transformation generated
ROSE_DLL_API void setOneSourcePositionForTransformation(SgNode *node);
//! Set current node's source position as NULL
ROSE_DLL_API void setOneSourcePositionNull(SgNode *node);
//! Recursively set source position info(Sg_File_Info) as transformation generated
ROSE_DLL_API void setSourcePositionForTransformation (SgNode * root);
//! Set source position info(Sg_File_Info) as transformation generated for all SgNodes in memory pool
// ROSE_DLL_API void setSourcePositionForTransformation_memoryPool();
//! Check if a node is from a system header file
ROSE_DLL_API bool insideSystemHeader (SgLocatedNode* node);
//! Set the source position of SgLocatedNode to Sg_File_Info::generateDefaultFileInfo(). These nodes WILL be unparsed. Not for transformation usage.
// ROSE_DLL_API void setSourcePosition (SgLocatedNode * locatedNode);
// ************************************************************************
//@}
//------------------------------------------------------------------------
//@{
/*! @name Data types
\brief
*/
// from src/midend/astInlining/typeTraits.h
// src/midend/astUtil/astInterface/AstInterface.h
//! Get the right bool type according to C or C++ language input
SgType* getBoolType(SgNode* n);
//! Check if a type is an integral type, only allowing signed/unsigned short, int, long, long long.
////!
////! There is another similar function named SgType::isIntegerType(), which allows additional types char, wchar, and bool to be treated as integer types
ROSE_DLL_API bool isStrictIntegerType(SgType* t);
//!Get the data type of the first initialized name of a declaration statement
ROSE_DLL_API SgType* getFirstVarType(SgVariableDeclaration* decl);
//! Is a type default constructible? This may not quite work properly.
ROSE_DLL_API bool isDefaultConstructible(SgType* type);
//! Is a type copy constructible? This may not quite work properly.
ROSE_DLL_API bool isCopyConstructible(SgType* type);
//! Is a type assignable? This may not quite work properly.
ROSE_DLL_API bool isAssignable(SgType* type);
#ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT
//! Check if a class type is a pure virtual class. True means that there is at least
//! one pure virtual function that has not been overridden.
//! In the case of an incomplete class type (forward declaration), this function returns false.
ROSE_DLL_API bool isPureVirtualClass(SgType* type, const ClassHierarchyWrapper& classHierarchy);
#endif
//! Does a type have a trivial (built-in) destructor?
ROSE_DLL_API bool hasTrivialDestructor(SgType* t);
//! Is this type a non-constant reference type? (Handles typedefs correctly)
ROSE_DLL_API bool isNonconstReference(SgType* t);
//! Is this type a const or non-const reference type? (Handles typedefs correctly)
ROSE_DLL_API bool isReferenceType(SgType* t);
//! Is this type a pointer type? (Handles typedefs correctly)
ROSE_DLL_API bool isPointerType(SgType* t);
//! Is this a pointer to a non-const type? Note that this function will return true for const pointers pointing to
//! non-const types. For example, (int* const y) points to a modifiable int, so this function returns true. Meanwhile,
//! it returns false for (int const * x) and (int const * const x) because these types point to a const int.
//! Also, only the outer layer of nested pointers is unwrapped. So the function returns true for (const int ** y), but returns
//! false for const (int * const * x)
ROSE_DLL_API bool isPointerToNonConstType(SgType* type);
//! Is this a const type?
/* const char* p = "aa"; is not treated as having a const type. It is a pointer to const char.
* Similarly, neither for const int b[10]; or const int & c =10;
* The standard says, "A compound type is not cv-qualified by the cv-qualifiers (if any) of
the types from which it is compounded. Any cv-qualifiers applied to an array type affect the array element type, not the array type".
*/
ROSE_DLL_API bool isConstType(SgType* t);
//! Remove const (if present) from a type. stripType() cannot do this because it removes all modifiers.
SgType* removeConst(SgType* t);
//! Is this a volatile type?
ROSE_DLL_API bool isVolatileType(SgType* t);
//! Is this a restrict type?
ROSE_DLL_API bool isRestrictType(SgType* t);
//! Is this a scalar type?
/*! We define the following SgType as scalar types: char, short, int, long , void, Wchar, Float, double, long long, string, bool, complex, imaginary
*/
ROSE_DLL_API bool isScalarType(SgType* t);
//! Check if a type is an integral type, only allowing signed/unsigned short, int, long, long long.
//!
//! There is another similar function named SgType::isIntegerType(), which allows additional types char, wchar, and bool.
ROSE_DLL_API bool isStrictIntegerType(SgType* t);
//! Check if a type is a struct type (a special SgClassType in ROSE)
ROSE_DLL_API bool isStructType(SgType* t);
//! Generate a mangled string for a given type based on Itanium C++ ABI
ROSE_DLL_API std::string mangleType(SgType* type);
//! Generate mangled scalar type names according to Itanium C++ ABI, the input type should pass isScalarType() in ROSE
ROSE_DLL_API std::string mangleScalarType(SgType* type);
//! Generated mangled modifier types, include const, volatile,according to Itanium C++ ABI, with extension to handle UPC shared types.
ROSE_DLL_API std::string mangleModifierType(SgModifierType* type);
//! Calculate the number of elements of an array type: dim1* dim2*... , assume element count is 1 for int a[]; Strip off THREADS if it is a UPC array.
ROSE_DLL_API size_t getArrayElementCount(SgArrayType* t);
//! Get the number of dimensions of an array type
ROSE_DLL_API int getDimensionCount(SgType* t);
//! Get the element type of an array. It recursively find the base type for multi-dimension array types
ROSE_DLL_API SgType* getArrayElementType(SgType* t);
//! Get the element type of an array, pointer or string, or NULL if not applicable. This function only check one level base type. No recursion.
ROSE_DLL_API SgType* getElementType(SgType* t);
/// \brief returns the array dimensions in an array as defined for arrtype
/// \param arrtype the type of a C/C++ array
/// \return an array that contains an expression indicating each dimension's size.
/// OWNERSHIP of the expressions is TRANSFERED TO the CALLER (which
/// becomes responsible for freeing the expressions).
/// Note, the first entry of the array is a SgNullExpression, iff the
/// first array dimension was not specified.
/// \code
/// int x[] = { 1, 2, 3 };
/// \endcode
/// note, the expression does not have to be a constant
/// \code
/// int x[i*5];
/// \endcode
/// \post return-value.empty() == false
/// \post return-value[*] != NULL (no nullptr in the returned vector)
std::vector<SgExpression*>
get_C_array_dimensions(const SgArrayType& arrtype);
/// \brief returns the array dimensions in an array as defined for arrtype
/// \param arrtype the type of a C/C++ array
/// \param varref a reference to an array variable (the variable of type arrtype)
/// \return an array that contains an expression indicating each dimension's size.
/// OWNERSHIP of the expressions is TRANSFERED TO the CALLER (which
/// becomes responsible for freeing the expressions).
/// If the first array dimension was not specified an expression
/// that indicates that size is generated.
/// \code
/// int x[][3] = { 1, 2, 3, 4, 5, 6 };
/// \endcode
/// the entry for the first dimension will be:
/// \code
/// // 3 ... size of 2nd dimension
/// sizeof(x) / (sizeof(int) * 3)
/// \endcode
/// \pre arrtype is the array-type of varref
/// \post return-value.empty() == false
/// \post return-value[*] != NULL (no nullptr in the returned vector)
/// \post !isSgNullExpression(return-value[*])
std::vector<SgExpression*>
get_C_array_dimensions(const SgArrayType& arrtype, const SgVarRefExp& varref);
/// \overload
/// \note see get_C_array_dimensions for SgVarRefExp for details.
/// \todo make initname const
std::vector<SgExpression*>
get_C_array_dimensions(const SgArrayType& arrtype, SgInitializedName& initname);
//! Check if an expression is an array access (SgPntrArrRefExp). If so, return its name expression and subscripts if requested. Users can use convertRefToInitializedName() to get the possible name. It does not check if the expression is a top level SgPntrArrRefExp.
ROSE_DLL_API bool isArrayReference(SgExpression* ref, SgExpression** arrayNameExp=NULL, std::vector<SgExpression*>** subscripts=NULL);
//! Collect variable references in array types. The default NodeQuery::querySubTree() will miss variables referenced in array type's index list. e.g. double *buffer = new double[numItems] ;
ROSE_DLL_API int collectVariableReferencesInArrayTypes (SgLocatedNode* root, Rose_STL_Container<SgNode*> & currentVarRefList);
//! Has a UPC shared type of any kinds (shared-to-shared, private-to-shared, shared-to-private, shared scalar/array)? An optional parameter, mod_type_out, stores the first SgModifierType with UPC access information.
/*!
* Note: we classify private-to-shared as 'has shared' type for convenience here. It is indeed a private type in strict sense.
AST graph for some examples:
- shared scalar: SgModifierType -->base type
- shared array: SgArrayType --> SgModiferType --> base type
- shared to shared: SgModifierType --> SgPointerType --> SgModifierType ->SgTypeInt
- shared to private: SgModifierType --> SgPointerType --> base type
- private to shared: SgPointerType --> SgModifierType --> base type
*/
ROSE_DLL_API bool hasUpcSharedType(SgType* t, SgModifierType ** mod_type_out = NULL );
//! Check if a type is a UPC shared type, including shared array, shared pointers etc. Exclude private pointers to shared types. Optionally return the modifier type with the UPC shared property.
/*!
* ROSE uses SgArrayType of SgModifierType to represent shared arrays, not SgModifierType points to SgArrayType. Also typedef may cause a chain of nodes before reach the actual SgModifierType with UPC shared property.
*/
ROSE_DLL_API bool isUpcSharedType(SgType* t, SgModifierType ** mod_type_out = NULL);
//! Check if a modifier type is a UPC shared type.
ROSE_DLL_API bool isUpcSharedModifierType (SgModifierType* mod_type);
//! Check if an array type is a UPC shared type. ROSE AST represents a UPC shared array as regular array of elements of UPC shared Modifier Type. Not directly a UPC shared Modifier Type of an array.
ROSE_DLL_API bool isUpcSharedArrayType (SgArrayType* array_type);
//! Check if a shared UPC type is strict memory consistency or not. Return false if it is relaxed. (So isUpcRelaxedSharedModifierType() is not necessary.)
ROSE_DLL_API bool isUpcStrictSharedModifierType(SgModifierType* mode_type);
//! Get the block size of a UPC shared modifier type
ROSE_DLL_API size_t getUpcSharedBlockSize(SgModifierType* mod_type);
//! Get the block size of a UPC shared type, including Modifier types and array of modifier types (shared arrays)
ROSE_DLL_API size_t getUpcSharedBlockSize(SgType* t);
//! Is UPC phase-less shared type? Phase-less means block size of the first SgModifierType with UPC information is 1 or 0/unspecified. Also return false if the type is not a UPC shared type.
ROSE_DLL_API bool isUpcPhaseLessSharedType (SgType* t);
//! Is a UPC private-to-shared pointer? SgPointerType comes first compared to SgModifierType with UPC information. Input type must be any of UPC shared types first.
ROSE_DLL_API bool isUpcPrivateToSharedType(SgType* t);
//! Is a UPC array with dimension of X*THREADS
ROSE_DLL_API bool isUpcArrayWithThreads(SgArrayType* t);
//! Lookup a named type based on its name, bottomup searching from a specified scope. Note name collison might be allowed for c (not C++) between typedef and enum/struct. Only the first matched named type will be returned in this case. typedef is returned as it is, not the base type it actually refers to.
ROSE_DLL_API SgType* lookupNamedTypeInParentScopes(const std::string& type_name, SgScopeStatement* scope=NULL);
// DQ (7/22/2014): Added support for comparing expression types in actual arguments with those expected from the formal function parameter types.
//! Get the type of the associated argument expression from the function type.
ROSE_DLL_API SgType* getAssociatedTypeFromFunctionTypeList(SgExpression* actual_argument_expression);
//! Verify that 2 SgTemplateArgument are equivalent (same type, same expression, or same template declaration)
ROSE_DLL_API bool templateArgumentEquivalence(SgTemplateArgument * arg1, SgTemplateArgument * arg2);
//! Verify that 2 SgTemplateArgumentPtrList are equivalent.
ROSE_DLL_API bool templateArgumentListEquivalence(const SgTemplateArgumentPtrList & list1, const SgTemplateArgumentPtrList & list2);
//! Test for equivalence of types independent of access permissions (private or protected modes for members of classes).
ROSE_DLL_API bool isEquivalentType (const SgType* lhs, const SgType* rhs);
//! Test if two types are equivalent SgFunctionType nodes. This is necessary for template function types
//! They may differ in one SgTemplateType pointer but identical otherwise.
ROSE_DLL_API bool isEquivalentFunctionType (const SgFunctionType* lhs, const SgFunctionType* rhs);
//@}
//------------------------------------------------------------------------
//@{
/*! @name Loop handling
\brief
*/
// by Jeremiah
//! Add a step statement to the end of a loop body
//! Add a new label to the end of the loop, with the step statement after
//! it; then change all continue statements in the old loop body into
//! jumps to the label
//!
//! For example:
//! while (a < 5) {if (a < -3) continue;} (adding "a++" to end) becomes
//! while (a < 5) {if (a < -3) goto label; label: a++;}
ROSE_DLL_API void addStepToLoopBody(SgScopeStatement* loopStmt, SgStatement* step);
ROSE_DLL_API void moveForStatementIncrementIntoBody(SgForStatement* f);
ROSE_DLL_API void convertForToWhile(SgForStatement* f);
ROSE_DLL_API void convertAllForsToWhiles(SgNode* top);
//! Change continue statements in a given block of code to gotos to a label
ROSE_DLL_API void changeContinuesToGotos(SgStatement* stmt, SgLabelStatement* label);
//!Return the loop index variable for a for loop
ROSE_DLL_API SgInitializedName* getLoopIndexVariable(SgNode* loop);
//!Check if a SgInitializedName is used as a loop index within a AST subtree
//! This function will use a bottom-up traverse starting from the subtree_root to find all enclosing loops and check if ivar is used as an index for either of them.
ROSE_DLL_API bool isLoopIndexVariable(SgInitializedName* ivar, SgNode* subtree_root);
//! Check if a for loop uses C99 style initialization statement with multiple expressions like for (int i=0, j=0; ..) or for (i=0,j=0;...)
/*!
for (int i=0, j=0; ..) is stored as two variable declarations under SgForInitStatement's init_stmt member
for (i=0,j=0;...) is stored as a single expression statement, with comma expression (i=0,j=0).
*/
ROSE_DLL_API bool hasMultipleInitStatmentsOrExpressions (SgForStatement* for_loop);
//! Routines to get and set the body of a loop
ROSE_DLL_API SgStatement* getLoopBody(SgScopeStatement* loop);
ROSE_DLL_API void setLoopBody(SgScopeStatement* loop, SgStatement* body);
//! Routines to get the condition of a loop. It recognize While-loop, For-loop, and Do-While-loop
ROSE_DLL_API SgStatement* getLoopCondition(SgScopeStatement* loop);
//! Set the condition statement of a loop, including While-loop, For-loop, and Do-While-loop.
ROSE_DLL_API void setLoopCondition(SgScopeStatement* loop, SgStatement* cond);
//! Check if a for-loop has a canonical form, return loop index, bounds, step, and body if requested
//!
//! A canonical form is defined as : one initialization statement, a test expression, and an increment expression , loop index variable should be of an integer type. IsInclusiveUpperBound is true when <= or >= is used for loop condition
ROSE_DLL_API bool isCanonicalForLoop(SgNode* loop, SgInitializedName** ivar=NULL, SgExpression** lb=NULL, SgExpression** ub=NULL, SgExpression** step=NULL, SgStatement** body=NULL, bool *hasIncrementalIterationSpace = NULL, bool* isInclusiveUpperBound = NULL);
//! Check if a Fortran Do loop has a complete canonical form: Do I=1, 10, 1
ROSE_DLL_API bool isCanonicalDoLoop(SgFortranDo* loop,SgInitializedName** ivar/*=NULL*/, SgExpression** lb/*=NULL*/, SgExpression** ub/*=NULL*/, SgExpression** step/*=NULL*/, SgStatement** body/*=NULL*/, bool *hasIncrementalIterationSpace/*= NULL*/, bool* isInclusiveUpperBound/*=NULL*/);
//! Set the lower bound of a loop header for (i=lb; ...)
ROSE_DLL_API void setLoopLowerBound(SgNode* loop, SgExpression* lb);
//! Set the upper bound of a loop header,regardless the condition expression type. for (i=lb; i op up, ...)
ROSE_DLL_API void setLoopUpperBound(SgNode* loop, SgExpression* ub);
//! Set the stride(step) of a loop 's incremental expression, regardless the expression types (i+=s; i= i+s, etc)
ROSE_DLL_API void setLoopStride(SgNode* loop, SgExpression* stride);
//! Normalize loop init stmt by promoting the single variable declaration statement outside of the for loop header's init statement, e.g. for (int i=0;) becomes int i_x; for (i_x=0;..) and rewrite the loop with the new index variable, if necessary
ROSE_DLL_API bool normalizeForLoopInitDeclaration(SgForStatement* loop);
//! Undo the normalization of for loop's C99 init declaration. Previous record of normalization is used to ease the reverse transformation.
ROSE_DLL_API bool unnormalizeForLoopInitDeclaration(SgForStatement* loop);
//! Normalize a for loop, return true if successful. Generated constants will be fold by default.
//!
//! Translations are :
//! For the init statement: for (int i=0;... ) becomes int i; for (i=0;..)
//! For test expression:
//! i<x is normalized to i<= (x-1) and
//! i>x is normalized to i>= (x+1)
//! For increment expression:
//! i++ is normalized to i+=1 and
//! i-- is normalized to i+=-1
//! i-=s is normalized to i+= -s
ROSE_DLL_API bool forLoopNormalization(SgForStatement* loop, bool foldConstant = true);
//! Normalize a for loop's test expression
//! i<x is normalized to i<= (x-1) and
//! i>x is normalized to i>= (x+1)
ROSE_DLL_API bool normalizeForLoopTest(SgForStatement* loop);
ROSE_DLL_API bool normalizeForLoopIncrement(SgForStatement* loop);
//!Normalize a Fortran Do loop. Make the default increment expression (1) explicit
ROSE_DLL_API bool doLoopNormalization(SgFortranDo* loop);
//! Unroll a target loop with a specified unrolling factor. It handles steps larger than 1 and adds a fringe loop if the iteration count is not evenly divisible by the unrolling factor.
ROSE_DLL_API bool loopUnrolling(SgForStatement* loop, size_t unrolling_factor);
//! Interchange/permutate a n-level perfectly-nested loop rooted at 'loop' using a lexicographical order number within (0,depth!).
ROSE_DLL_API bool loopInterchange(SgForStatement* loop, size_t depth, size_t lexicoOrder);
//! Tile the n-level (starting from 1) loop of a perfectly nested loop nest using tiling size s
ROSE_DLL_API bool loopTiling(SgForStatement* loopNest, size_t targetLevel, size_t tileSize);
//Winnie Loop Collapsing
SgExprListExp * loopCollapsing(SgForStatement* target_loop, size_t collapsing_factor);
bool getForLoopInformations(
SgForStatement * for_loop,
SgVariableSymbol * & iterator,
SgExpression * & lower_bound,
SgExpression * & upper_bound,
SgExpression * & stride
);
//@}
//------------------------------------------------------------------------
//@{
/*! @name Topdown search
\brief Top-down traversal from current node to find a node of a specified type
*/
//! Query a subtree to get all nodes of a given type, with an appropriate downcast.
template <typename NodeType>
std::vector<NodeType*> querySubTree(SgNode* top, VariantT variant = (VariantT)NodeType::static_variant)
{
#if 0
printf ("Top of SageInterface::querySubTree() \n");
#endif
Rose_STL_Container<SgNode*> nodes = NodeQuery::querySubTree(top,variant);
std::vector<NodeType*> result(nodes.size(), NULL);
int count = 0;
#if 0
printf ("In SageInterface::querySubTree(): before initialization loop \n");
#endif
for (Rose_STL_Container<SgNode*>::const_iterator i = nodes.begin(); i != nodes.end(); ++i, ++count)
{
#if 0
printf ("In SageInterface::querySubTree(): in loop: count = %d \n",count);
#endif
NodeType* node = dynamic_cast<NodeType*>(*i);
ROSE_ASSERT (node);
result[count] = node;
}
#if 0
printf ("Leaving SageInterface::querySubTree(): after initialization loop \n");
#endif
return result;
}
/*! \brief Returns STL vector of SgFile IR node pointers.
Demonstrates use of restricted traversal over just SgFile IR nodes.
*/
std::vector < SgFile * >generateFileList ();
/** Get the current SgProject IR Node.
*
* The library should never have more than one project and it asserts such. If no project has been created yet then this
* function returns the null pointer. */
ROSE_DLL_API SgProject * getProject();
//! \return the project associated with a node
SgProject * getProject(const SgNode * node);
//! Query memory pools to grab SgNode of a specified type
template <typename NodeType>
static std::vector<NodeType*> getSgNodeListFromMemoryPool()
{
// This function uses a memory pool traversal specific to the SgFile IR nodes
class MyTraversal : public ROSE_VisitTraversal
{
public:
std::vector<NodeType*> resultlist;
void visit ( SgNode* node)
{
NodeType* result = dynamic_cast<NodeType* > (node);
ROSE_ASSERT(result!= NULL);
if (result!= NULL)
{
resultlist.push_back(result);
}
};
virtual ~MyTraversal() {}
};
MyTraversal my_traversal;
NodeType::traverseMemoryPoolNodes(my_traversal);
return my_traversal.resultlist;
}
/*! \brief top-down traversal from current node to find the main() function declaration
*/
ROSE_DLL_API SgFunctionDeclaration* findMain(SgNode* currentNode);
//! Find the last declaration statement within a scope (if any). This is often useful to decide where to insert another variable declaration statement. Pragma declarations are not treated as a declaration by default in this context.
SgStatement* findLastDeclarationStatement(SgScopeStatement * scope, bool includePragma = false);
//midend/programTransformation/partialRedundancyElimination/pre.h
//! Find referenced symbols within an expression
std::vector<SgVariableSymbol*> getSymbolsUsedInExpression(SgExpression* expr);
//! Find break statements inside a particular statement, stopping at nested loops or switches
/*! loops or switch statements defines their own contexts for break
statements. The function will stop immediately if run on a loop or switch
statement. If fortranLabel is non-empty, breaks (EXITs) to that label within
nested loops are included in the returned list.
*/
std::vector<SgBreakStmt*> findBreakStmts(SgStatement* code, const std::string& fortranLabel = "");
//! Find all continue statements inside a particular statement, stopping at nested loops
/*! Nested loops define their own contexts for continue statements. The
function will stop immediately if run on a loop
statement. If fortranLabel is non-empty, continues (CYCLEs) to that label
within nested loops are included in the returned list.
*/
std::vector<SgContinueStmt*> findContinueStmts(SgStatement* code, const std::string& fortranLabel = "");
std::vector<SgGotoStatement*> findGotoStmts(SgStatement* scope, SgLabelStatement* l);
std::vector<SgStatement*> getSwitchCases(SgSwitchStatement* sw);
//! Collect all variable references in a subtree
void collectVarRefs(SgLocatedNode* root, std::vector<SgVarRefExp* >& result);
//! Topdown traverse a subtree from root to find the first declaration given its name, scope (optional, can be NULL), and defining or nondefining flag.
template <typename T>
T* findDeclarationStatement(SgNode* root, std::string name, SgScopeStatement* scope, bool isDefining)
{
bool found = false;
#if 0
printf ("In findDeclarationStatement(): root = %p \n",root);
printf ("In findDeclarationStatement(): name = %s \n",name.c_str());
printf ("In findDeclarationStatement(): scope = %p \n",scope);
printf ("In findDeclarationStatement(): isDefining = %s \n",isDefining ? "true" : "false");
#endif
// Do we really want a NULL pointer to be acceptable input to this function?
// Maybe we should have an assertion that it is non-null?
if (!root) return NULL;
T* decl = dynamic_cast<T*>(root);
#if 0
printf ("In findDeclarationStatement(): decl = %p \n",decl);
#endif
if (decl != NULL)
{
if (scope)
{
if ((decl->get_scope() == scope) && (decl->search_for_symbol_from_symbol_table()->get_name() == name))
{
found = true;
}
}
else // Liao 2/9/2010. We should allow NULL scope
{
#if 0
// DQ (12/6/2016): Include this into the debugging code to aboid compiler warning about unused variable.
SgSymbol* symbol = decl->search_for_symbol_from_symbol_table();
printf ("In findDeclarationStatement(): decl->search_for_symbol_from_symbol_table() = %p \n",symbol);
printf ("In findDeclarationStatement(): decl->search_for_symbol_from_symbol_table()->get_name() = %s \n",symbol->get_name().str());
#endif
if (decl->search_for_symbol_from_symbol_table()->get_name() == name)
{
found = true;
}
}
}
if (found)
{
if (isDefining)
{
#if 0
printf ("In findDeclarationStatement(): decl->get_firstNondefiningDeclaration() = %p \n",decl->get_firstNondefiningDeclaration());
printf ("In findDeclarationStatement(): decl->get_definingDeclaration() = %p \n",decl->get_definingDeclaration());
#endif
ROSE_ASSERT (decl->get_definingDeclaration() != NULL);
#if 0
printf ("In findDeclarationStatement(): returing decl->get_definingDeclaration() = %p \n",decl->get_definingDeclaration());
#endif
return dynamic_cast<T*> (decl->get_definingDeclaration());
}
else
{
#if 0
printf ("In findDeclarationStatement(): returing decl = %p \n",decl);
#endif
return decl;
}
}
std::vector<SgNode*> children = root->get_traversalSuccessorContainer();
#if 0
printf ("In findDeclarationStatement(): children.size() = %zu \n",children.size());
#endif
// DQ (4/10/2016): Note that if we are searching for a function member that has it's defining
// declaration defined outside of the class then it will not be found in the child list.
for (std::vector<SgNode*>::const_iterator i = children.begin(); i != children.end(); ++i)
{
T* target = findDeclarationStatement<T> (*i,name,scope,isDefining);
if (target)
{
return target;
}
}
return NULL;
}
//! Topdown traverse a subtree from root to find the first function declaration matching the given name, scope (optional, can be NULL), and defining or nondefining flag. This is an instantiation of findDeclarationStatement<T>.
SgFunctionDeclaration* findFunctionDeclaration(SgNode* root, std::string name, SgScopeStatement* scope, bool isDefining);
#if 0 //TODO
// 1. preorder traversal from current SgNode till find next SgNode of type V_SgXXX
// until reach the end node
SgNode* getNextSgNode( const SgNode* astSourceNode, VariantT=V_SgNode, SgNode* astEndNode=NULL);
// 2. return all nodes of type VariantT following the source node
std::vector<SgNode*> getAllNextSgNode( const SgNode* astSourceNode, VariantT=V_SgNode, SgNode* astEndNode=NULL);
#endif
//@}
//------------------------------------------------------------------------
//@{
/*! @name Bottom up search
\brief Backwards traverse through the AST to find a node, findEnclosingXXX()
*/
// remember to put const to all arguments.
/** Find a node by type using upward traversal.
*
* Traverse backward through a specified node's ancestors, starting with the node's parent and progressing to more distant
* ancestors, to find the first node matching the specified or derived type. If @p includingSelf is true then the
* starting node, @p astNode, is returned if its type matches, otherwise the search starts at the parent of @p astNode.
*
* For the purposes of this function, the parent (P) of an SgDeclarationStatement node (N) is considered to be the first
* non-defining declaration of N if N has both a defining declaration and a first non-defining declaration and the defining
* declaration is different than the first non-defining declaration.
*
* If no ancestor of the requisite type of subtypes is found then this function returns a null pointer.
*
* If @p astNode is the null pointer, then the return value is a null pointer. That is, if there is no node, then there cannot
* be an enclosing node of the specified type. */
template <typename NodeType>
NodeType* getEnclosingNode(const SgNode* astNode, const bool includingSelf = false)
{
#if 1
// DQ (10/20/2012): This is the older version of this implementation. Until I am sure that
// the newer version (below) is what we want to use I will resolve this conflict by keeping
// the previous version in place.
if (NULL == astNode)
{
return NULL;
}
if ( (includingSelf ) && (dynamic_cast<const NodeType*>(astNode)) )
{
return const_cast<NodeType*>(dynamic_cast<const NodeType*> (astNode));
}
// DQ (3/5/2012): Check for reference to self...
ROSE_ASSERT(astNode->get_parent() != astNode);
SgNode* parent = astNode->get_parent();
// DQ (3/5/2012): Check for loops that will cause infinite loops.
SgNode* previouslySeenParent = parent;
bool foundCycle = false;
while ( (foundCycle == false) && (parent != NULL) && (!dynamic_cast<const NodeType*>(parent)) )
{
ROSE_ASSERT(parent->get_parent() != parent);
#if 0
printf ("In getEnclosingNode(): parent = %p = %s \n",parent,parent->class_name().c_str());
#endif
parent = parent->get_parent();
// DQ (3/5/2012): Check for loops that will cause infinite loops.
// ROSE_ASSERT(parent != previouslySeenParent);
if (parent == previouslySeenParent)
{
foundCycle = true;
}
}
#if 0
printf ("previouslySeenParent = %p = %s \n",previouslySeenParent,previouslySeenParent->class_name().c_str());
#endif
parent = previouslySeenParent;
SgDeclarationStatement* declarationStatement = isSgDeclarationStatement(parent);
if (declarationStatement != NULL)
{
#if 0
printf ("Found a SgDeclarationStatement \n");
#endif
SgDeclarationStatement* definingDeclaration = declarationStatement->get_definingDeclaration();
SgDeclarationStatement* firstNondefiningDeclaration = declarationStatement->get_firstNondefiningDeclaration();
#if 0
printf (" --- declarationStatement = %p \n",declarationStatement);
printf (" --- definingDeclaration = %p \n",definingDeclaration);
if (definingDeclaration != NULL && definingDeclaration->get_parent() != NULL)
printf (" --- definingDeclaration ->get_parent() = %p = %s \n",definingDeclaration->get_parent(),definingDeclaration->get_parent()->class_name().c_str());
printf (" --- firstNondefiningDeclaration = %p \n",firstNondefiningDeclaration);
if (firstNondefiningDeclaration != NULL && firstNondefiningDeclaration->get_parent() != NULL)
printf (" --- firstNondefiningDeclaration ->get_parent() = %p = %s \n",firstNondefiningDeclaration->get_parent(),firstNondefiningDeclaration->get_parent()->class_name().c_str());
#endif
if (definingDeclaration != NULL && declarationStatement != firstNondefiningDeclaration)
{
#if 0
printf ("Found a nondefining declaration so use the non-defining declaration instead \n");
#endif
// DQ (10/19/2012): Use the defining declaration instead.
// parent = firstNondefiningDeclaration;
parent = definingDeclaration;
}
}
#if 0
printf ("reset: previouslySeenParent = %p = %s \n",previouslySeenParent,previouslySeenParent->class_name().c_str());
#endif
// DQ (10/19/2012): This branch is just to document the cycle that was previously detected, it is for
// debugging only. Thus it ony make sense for it to be executed when "(foundCycle == true)". However,
// this will have to be revisited later since it appears clear that it is a problem for the binary analysis
// work when it is visited for this case. Since the cycle is detected, but there is no assertion on the
// cycle, we don't exit when a cycle is identified (which is the point of the code below).
// Note also that I have fixed the code (above and below) to only chase pointers through defining
// declarations (where they exist), this is important since non-defining declarations can be almost
// anywhere (and thus chasing them can make it appear that there are cycles where there are none
// (I think); test2012_234.C demonstrates an example of this.
// DQ (10/9/2012): Robb has suggested this change to fix the binary analysis work.
// if (foundCycle == true)
if (foundCycle == false)
{
while ( (parent != NULL) && (!dynamic_cast<const NodeType*>(parent)) )
{
ROSE_ASSERT(parent->get_parent() != parent);
#if 0
printf ("In getEnclosingNode() (2nd try): parent = %p = %s \n",parent,parent->class_name().c_str());
if (parent->get_file_info() != NULL)
parent->get_file_info()->display("In getEnclosingNode() (2nd try): debug");
#endif
SgDeclarationStatement* declarationStatement = isSgDeclarationStatement(parent);
if (declarationStatement != NULL)
{
#if 0
printf ("Found a SgDeclarationStatement \n");
#endif
SgDeclarationStatement* definingDeclaration = declarationStatement->get_definingDeclaration();
SgDeclarationStatement* firstNondefiningDeclaration = declarationStatement->get_firstNondefiningDeclaration();
#if 0
printf (" --- declarationStatement = %p = %s \n",declarationStatement,(declarationStatement != NULL) ? declarationStatement->class_name().c_str() : "null");
printf (" --- definingDeclaration = %p \n",definingDeclaration);
if (definingDeclaration != NULL && definingDeclaration->get_parent() != NULL)
printf (" --- definingDeclaration ->get_parent() = %p = %s \n",definingDeclaration->get_parent(),definingDeclaration->get_parent()->class_name().c_str());
printf (" --- firstNondefiningDeclaration = %p \n",firstNondefiningDeclaration);
if (firstNondefiningDeclaration != NULL && firstNondefiningDeclaration->get_parent() != NULL)
printf (" --- firstNondefiningDeclaration ->get_parent() = %p = %s \n",firstNondefiningDeclaration->get_parent(),firstNondefiningDeclaration->get_parent()->class_name().c_str());
#endif
if (definingDeclaration != NULL && declarationStatement != firstNondefiningDeclaration)
{
#if 0
printf ("Found a nondefining declaration so use the firstNondefining declaration instead \n");
#endif
// DQ (10/19/2012): Use the defining declaration instead.
// parent = firstNondefiningDeclaration;
parent = definingDeclaration;
}
}
parent = parent->get_parent();
#if 1
// DQ (3/5/2012): Check for loops that will cause infinite loops.
ROSE_ASSERT(parent != previouslySeenParent);
#else
printf ("WARNING::WARNING::WARNING commented out assertion for parent != previouslySeenParent \n");
if (parent == previouslySeenParent)
break;
#endif
}
}
return const_cast<NodeType*>(dynamic_cast<const NodeType*> (parent));
#else
// DQ (10/20/2012): Using Robb's newer version with my modification to use the definingDeclaration rather than firstNondefiningDeclaration (below).
// Find the parent of specified type, but watch out for cycles in the ancestry (which would cause an infinite loop).
// Cast away const because isSg* functions aren't defined for const node pointers; and our return is not const.
SgNode *node = const_cast<SgNode*>(!astNode || includingSelf ? astNode : astNode->get_parent());
std::set<const SgNode*> seen; // nodes we've seen, in order to detect cycles
while (node) {
if (NodeType *found = dynamic_cast<NodeType*>(node))
return found;
// FIXME: Cycle detection could be moved elsewhere so we don't need to do it on every call. [RPM 2012-10-09]
ROSE_ASSERT(seen.insert(node).second);
// Traverse to parent (declaration statements are a special case)
if (SgDeclarationStatement *declarationStatement = isSgDeclarationStatement(node)) {
SgDeclarationStatement *definingDeclaration = declarationStatement->get_definingDeclaration();
SgDeclarationStatement *firstNondefiningDeclaration = declarationStatement->get_firstNondefiningDeclaration();
if (definingDeclaration && firstNondefiningDeclaration && declarationStatement != firstNondefiningDeclaration) {
// DQ (10/19/2012): Use the defining declaration instead.
// node = firstNondefiningDeclaration;
node = definingDeclaration;
}
} else {
node = node->get_parent();
}
}
return NULL;
#endif
}
//! Find enclosing source file node
ROSE_DLL_API SgSourceFile* getEnclosingSourceFile(SgNode* n, const bool includingSelf=false);
//! Get the closest scope from astNode. Return astNode if it is already a scope.
ROSE_DLL_API SgScopeStatement* getScope(const SgNode* astNode);
//! Get the enclosing scope from a node n
ROSE_DLL_API SgScopeStatement* getEnclosingScope(SgNode* n, const bool includingSelf=false);
//! Traverse back through a node's parents to find the enclosing global scope
ROSE_DLL_API SgGlobal* getGlobalScope( const SgNode* astNode);
//! Find the function definition
ROSE_DLL_API SgFunctionDefinition* getEnclosingProcedure(SgNode* n, const bool includingSelf=false);
ROSE_DLL_API SgFunctionDefinition* getEnclosingFunctionDefinition(SgNode* astNode, const bool includingSelf=false);
//! Find the closest enclosing statement, including the given node
ROSE_DLL_API SgStatement* getEnclosingStatement(SgNode* n);
//! Find the closest switch outside a given statement (normally used for case and default statements)
ROSE_DLL_API SgSwitchStatement* findEnclosingSwitch(SgStatement* s);
//! Find enclosing OpenMP clause body statement from s. If s is already one, return it directly.
ROSE_DLL_API SgOmpClauseBodyStatement* findEnclosingOmpClauseBodyStatement(SgStatement* s);
//! Find the closest loop outside the given statement; if fortranLabel is not empty, the Fortran label of the loop must be equal to it
ROSE_DLL_API SgScopeStatement* findEnclosingLoop(SgStatement* s, const std::string& fortranLabel = "", bool stopOnSwitches = false);
//! Find the enclosing function declaration, including its derived instances like isSgProcedureHeaderStatement, isSgProgramHeaderStatement, and isSgMemberFunctionDeclaration.
ROSE_DLL_API SgFunctionDeclaration * getEnclosingFunctionDeclaration (SgNode * astNode, const bool includingSelf=false);
//roseSupport/utility_functions.h
//! get the SgFile node from current node
ROSE_DLL_API SgFile* getEnclosingFileNode (SgNode* astNode );
//! Get the initializer containing an expression if it is within an initializer.
ROSE_DLL_API SgInitializer* getInitializerOfExpression(SgExpression* n);
//! Get the closest class definition enclosing the specified AST node,
ROSE_DLL_API SgClassDefinition* getEnclosingClassDefinition(SgNode* astnode, const bool includingSelf=false);
//! Get the closest class declaration enclosing the specified AST node,
ROSE_DLL_API SgClassDeclaration* getEnclosingClassDeclaration( SgNode* astNode );
// DQ (2/7/2019): Adding support for name qualification of variable references associated with SgPointerMemberType function parameters.
//! Get the enclosing SgExprListExp (used as part of function argument index evaluation in subexpressions).
ROSE_DLL_API SgExprListExp* getEnclosingExprListExp(SgNode* astNode, const bool includingSelf = false);
// DQ (2/7/2019): Need a function to return when an expression is in an expression subtree.
// This is part of index evaluation ofr expressions in function argument lists, but likely usefule elsewhere as well.
ROSE_DLL_API bool isInSubTree(SgExpression* subtree, SgExpression* exp);
// DQ (2/7/2019): Need a function to return the SgFunctionDeclaration from a SgFunctionCallExp.
ROSE_DLL_API SgFunctionDeclaration* getFunctionDeclaration ( SgFunctionCallExp* functionCallExp );
// DQ (2/17/2019): Generalizing this support for SgVarRefExp and SgMemberFunctionRefExp nodes.
// DQ (2/8/2019): Adding support for detecting when to use added name qualification for pointer-to-member expressions.
ROSE_DLL_API bool isDataMemberReference(SgVarRefExp* varRefExp);
// ROSE_DLL_API bool isAddressTaken(SgVarRefExp* varRefExp);
ROSE_DLL_API bool isAddressTaken(SgExpression* refExp);
// DQ (2/17/2019): Adding support for detecting when to use added name qualification for membr function references.
ROSE_DLL_API bool isMemberFunctionMemberReference(SgMemberFunctionRefExp* memberFunctionRefExp);
// DQ (2/15/2019): Adding support for detecting which class a member reference is being made from.
// ROSE_DLL_API SgClassType* getClassTypeForDataMemberReference(SgVarRefExp* varRefExp);
// ROSE_DLL_API std::list<SgClassType*> getClassTypeChainForDataMemberReference(SgVarRefExp* varRefExp);
ROSE_DLL_API std::list<SgClassType*> getClassTypeChainForMemberReference(SgExpression* refExp);
// DQ (2/17/2019): Display the shared nodes in the AST for debugging.
ROSE_DLL_API void outputSharedNodes( SgNode* node );
// TODO
#if 0
SgNode * getEnclosingSgNode(SgNode* source,VariantT, SgNode* endNode=NULL);
std::vector<SgNode *> getAllEnclosingSgNode(SgNode* source,VariantT, SgNode* endNode=NULL);
SgVariableDeclaration* findVariableDeclaratin( const string& varname)
SgClassDeclaration* getEnclosingClassDeclaration( const SgNode* astNode);
// e.g. for some expression, find its parent statement
SgStatement* getEnclosingStatement(const SgNode* astNode);
SgSwitchStatement* getEnclosingSwitch(SgStatement* s);
SgModuleStatement* getEnclosingModuleStatement( const SgNode* astNode);
// used to build a variable reference for compiler generated code in current scope
SgSymbol * findReachingDefinition (SgScopeStatement* startScope, SgName &name);
#endif
//@}
//------------------------------------------------------------------------
//@{
/*! @name AST Walk and Traversal
\brief
*/
// Liao, 1/9/2008
/*!
\brief return the first global scope under current project
*/
ROSE_DLL_API SgGlobal * getFirstGlobalScope(SgProject *project);
/*!
\brief get the last statement within a scope, return NULL if it does not exit
*/
ROSE_DLL_API SgStatement* getLastStatement(SgScopeStatement *scope);
//! Get the first statement within a scope, return NULL if it does not exist. Skip compiler-generated statement by default. Count transformation-generated ones, but excluding those which are not to be outputted in unparsers.
ROSE_DLL_API SgStatement* getFirstStatement(SgScopeStatement *scope,bool includingCompilerGenerated=false);
//!Find the first defining function declaration statement in a scope
ROSE_DLL_API SgFunctionDeclaration* findFirstDefiningFunctionDecl(SgScopeStatement* scope);
//! Get next statement within the same scope of current statement
ROSE_DLL_API SgStatement* getNextStatement(SgStatement * currentStmt);
//! Get previous statement of the current statement. It may return a previous statement of a parent scope by default (climbOutScope is true), otherwise only a previous statement of the same scope is returned.
ROSE_DLL_API SgStatement* getPreviousStatement(SgStatement * currentStmt, bool climbOutScope = true);
#if 0 //TODO
// preorder traversal from current SgNode till find next SgNode of type V_SgXXX
SgNode* getNextSgNode( const SgNode* currentNode, VariantT=V_SgNode);
#endif
// DQ (11/15/2018): Adding support for traversals over the include file tree.
//! return path prefix for subtree of include files.
void listHeaderFiles ( SgIncludeFile* includeFile );
//@}
//------------------------------------------------------------------------
//@{
/*! @name AST Comparison
\brief Compare AST nodes, subtree, etc
*/
//! Check if a SgIntVal node has a given value
ROSE_DLL_API bool isEqualToIntConst(SgExpression* e, int value);
//! Check if two function declarations refer to the same one. Two function declarations are the same when they are a) identical, b) same name in C c) same qualified named and mangled name in C++. A nondefining (prototype) declaration and a defining declaration of a same function are treated as the same.
/*!
* There is a similar function bool compareFunctionDeclarations(SgFunctionDeclaration *f1, SgFunctionDeclaration *f2) from Classhierarchy.C
*/
ROSE_DLL_API bool isSameFunction(SgFunctionDeclaration* func1, SgFunctionDeclaration* func2);
//! Check if a statement is the last statement within its closed scope
ROSE_DLL_API bool isLastStatement(SgStatement* stmt);
//@}
//------------------------------------------------------------------------
//@{
/*! @name AST insert, removal, and replacement
\brief Add, remove,and replace AST
scope->append_statement(), exprListExp->append_expression() etc. are not enough to handle side effect of parent pointers, symbol tables, preprocessing info, defining/nondefining pointers etc.
*/
// DQ (2/24/2009): Simple function to delete an AST subtree (used in outlining).
//! Function to delete AST subtree's nodes only, users must take care of any dangling pointers, symbols or types that result.
ROSE_DLL_API void deleteAST(SgNode* node);
//! Special purpose function for deleting AST expression tress containing valid original expression trees in constant folded expressions (for internal use only).
ROSE_DLL_API void deleteExpressionTreeWithOriginalExpressionSubtrees(SgNode* root);
// DQ (2/25/2009): Added new function to support outliner.
//! Move statements in first block to the second block (preserves order and rebuilds the symbol table).
ROSE_DLL_API void moveStatementsBetweenBlocks ( SgBasicBlock* sourceBlock, SgBasicBlock* targetBlock );
//! Move a variable declaration to a new scope, handle symbol, special scopes like For loop, etc.
ROSE_DLL_API void moveVariableDeclaration(SgVariableDeclaration* decl, SgScopeStatement* target_scope);
//! Append a statement to the end of the current scope, handle side effect of appending statements, e.g. preprocessing info, defining/nondefining pointers etc.
ROSE_DLL_API void appendStatement(SgStatement *stmt, SgScopeStatement* scope=NULL);
//! Append a statement to the end of SgForInitStatement
ROSE_DLL_API void appendStatement(SgStatement *stmt, SgForInitStatement* for_init_stmt);
//! Append a list of statements to the end of the current scope, handle side effect of appending statements, e.g. preprocessing info, defining/nondefining pointers etc.
ROSE_DLL_API void appendStatementList(const std::vector<SgStatement*>& stmt, SgScopeStatement* scope=NULL);
// DQ (2/6/2009): Added function to support outlining into separate file.
//! Append a copy ('decl') of a function ('original_statement') into a 'scope', include any referenced declarations required if the scope is within a compiler generated file. All referenced declarations, including those from headers, are inserted if excludeHeaderFiles is set to true (the new file will not have any headers).
ROSE_DLL_API void appendStatementWithDependentDeclaration( SgDeclarationStatement* decl, SgGlobal* scope, SgStatement* original_statement, bool excludeHeaderFiles );
//! Prepend a statement to the beginning of the current scope, handling side
//! effects as appropriate
ROSE_DLL_API void prependStatement(SgStatement *stmt, SgScopeStatement* scope=NULL);
//! Prepend a statement to the beginning of SgForInitStatement
ROSE_DLL_API void prependStatement(SgStatement *stmt, SgForInitStatement* for_init_stmt);
//! prepend a list of statements to the beginning of the current scope,
//! handling side effects as appropriate
ROSE_DLL_API void prependStatementList(const std::vector<SgStatement*>& stmt, SgScopeStatement* scope=NULL);
//! Check if a scope statement has a simple children statement list
//! so insert additional statements under the scope is straightforward and unambiguous .
//! for example, SgBasicBlock has a simple statement list while IfStmt does not.
ROSE_DLL_API bool hasSimpleChildrenList (SgScopeStatement* scope);
//! Insert a statement before or after the target statement within the target's scope. Move around preprocessing info automatically
ROSE_DLL_API void insertStatement(SgStatement *targetStmt, SgStatement* newStmt, bool insertBefore= true, bool autoMovePreprocessingInfo = true);
//! Insert a list of statements before or after the target statement within the
//target's scope
ROSE_DLL_API void insertStatementList(SgStatement *targetStmt, const std::vector<SgStatement*>& newStmts, bool insertBefore= true);
//! Insert a statement before a target statement
ROSE_DLL_API void insertStatementBefore(SgStatement *targetStmt, SgStatement* newStmt, bool autoMovePreprocessingInfo = true);
//! Insert a list of statements before a target statement
ROSE_DLL_API void insertStatementListBefore(SgStatement *targetStmt, const std::vector<SgStatement*>& newStmts);
//! Insert a statement after a target statement, Move around preprocessing info automatically by default
ROSE_DLL_API void insertStatementAfter(SgStatement *targetStmt, SgStatement* newStmt, bool autoMovePreprocessingInfo = true);
//! Insert a list of statements after a target statement
ROSE_DLL_API void insertStatementListAfter(SgStatement *targetStmt, const std::vector<SgStatement*>& newStmt);
//! Insert a statement after the last declaration within a scope. The statement will be prepended to the scope if there is no declaration statement found
ROSE_DLL_API void insertStatementAfterLastDeclaration(SgStatement* stmt, SgScopeStatement* scope);
//! Insert a list of statements after the last declaration within a scope. The statement will be prepended to the scope if there is no declaration statement found
ROSE_DLL_API void insertStatementAfterLastDeclaration(std::vector<SgStatement*> stmt_list, SgScopeStatement* scope);
//! Insert a statement before the first non-declaration statement in a scope. If the scope has no non-declaration statements
// then the statement is inserted at the end of the scope.
ROSE_DLL_API void insertStatementBeforeFirstNonDeclaration(SgStatement *newStmt, SgScopeStatement *scope,
bool movePreprocessingInfo=true);
//! Insert statements before the first non-declaration statement in a scope. If the scope has no non-declaration statements
//then the new statements are inserted at the end of the scope.
ROSE_DLL_API void insertStatementListBeforeFirstNonDeclaration(const std::vector<SgStatement*> &newStmts, SgScopeStatement *scope);
// DQ (11/21/2018): We need to sometimes insert something after the last statement of the collection from rose_edg_required_macros_and_functions.h.
ROSE_DLL_API SgStatement* lastFrontEndSpecificStatement( SgGlobal* globalScope );
//! Remove a statement from its attach point of the AST. Automatically keep its associated preprocessing information at the original place after the removal. The statement is still in memory and it is up to the users to decide if the removed one will be inserted somewhere else or released from memory (deleteAST()).
ROSE_DLL_API void removeStatement(SgStatement* stmt, bool autoRelocatePreprocessingInfo = true);
//! Deep delete a sub AST tree. It uses postorder traversal to delete each child node. Users must take care of any dangling pointers, symbols or types that result. This is identical to deleteAST()
ROSE_DLL_API void deepDelete(SgNode* root);
//! Replace a statement with another. Move preprocessing information from oldStmt to newStmt if requested.
ROSE_DLL_API void replaceStatement(SgStatement* oldStmt, SgStatement* newStmt, bool movePreprocessinInfo = false);
//! Replace an anchor node with a specified pattern subtree with optional SgVariantExpression. All SgVariantExpression in the pattern will be replaced with copies of the anchor node.
ROSE_DLL_API SgNode* replaceWithPattern (SgNode * anchor, SgNode* new_pattern);
//! Replace all variable references to an old symbol in a scope to being references to a new symbol.
// Essentially replace variable a with b.
ROSE_DLL_API void replaceVariableReferences(SgVariableSymbol* old_sym, SgVariableSymbol* new_sym, SgScopeStatement * scope );
// DQ (11/12/2018): Adding test to avoid issues that we can't test for in the unparsing of header files using the token based unparsing.
//! If header file unparsing and token-based unparsing are used, then some statements in header files
//! used with the same name and different include syntax can't be transformed. This is currently because
//! there is no way to generally test the resulting transformed code generated by ROSE.
ROSE_DLL_API bool statementCanBeTransformed(SgStatement* stmt);
/** Given an expression, generates a temporary variable whose initializer optionally evaluates
* that expression. Then, the var reference expression returned can be used instead of the original
* expression. The temporary variable created can be reassigned to the expression by the returned SgAssignOp;
* this can be used when the expression the variable represents needs to be evaluated. NOTE: This handles
* reference types correctly by using pointer types for the temporary.
* @param expression Expression which will be replaced by a variable
* @param scope scope in which the temporary variable will be generated
* @param reEvaluate an assignment op to reevaluate the expression. Leave NULL if not needed
* @return declaration of the temporary variable, and a a variable reference expression to use instead of
* the original expression. */
std::pair<SgVariableDeclaration*, SgExpression* > createTempVariableForExpression(SgExpression* expression,
SgScopeStatement* scope, bool initializeInDeclaration, SgAssignOp** reEvaluate = NULL);
/* This function creates a temporary variable for a given expression in the given scope
This is different from SageInterface::createTempVariableForExpression in that it does not
try to be smart to create pointers to reference types and so on. The tempt is initialized to expression.
The caller is responsible for setting the parent of SgVariableDeclaration since buildVariableDeclaration
may not set_parent() when the scope stack is empty. See programTransformation/extractFunctionArgumentsNormalization/ExtractFunctionArguments.C for sample usage.
@param expression Expression which will be replaced by a variable
@param scope scope in which the temporary variable will be generated
*/
std::pair<SgVariableDeclaration*, SgExpression*> createTempVariableAndReferenceForExpression
(SgExpression* expression, SgScopeStatement* scope);
//! Append an argument to SgFunctionParameterList, transparently set parent,scope, and symbols for arguments when possible
/*! We recommend to build SgFunctionParameterList before building a function declaration
However, it is still allowed to append new arguments for existing function declarations.
\todo function type , function symbol also need attention.
*/
ROSE_DLL_API SgVariableSymbol* appendArg(SgFunctionParameterList *, SgInitializedName*);
//!Prepend an argument to SgFunctionParameterList
ROSE_DLL_API SgVariableSymbol* prependArg(SgFunctionParameterList *, SgInitializedName*);
//! Append an expression to a SgExprListExp, set the parent pointer also
ROSE_DLL_API void appendExpression(SgExprListExp *, SgExpression*);
//! Append an expression list to a SgExprListExp, set the parent pointers also
ROSE_DLL_API void appendExpressionList(SgExprListExp *, const std::vector<SgExpression*>&);
//! Set parameter list for a function declaration, considering existing parameter list etc.
template <class actualFunction>
void setParameterList(actualFunction *func,SgFunctionParameterList *paralist) {
// TODO consider the difference between C++ and Fortran
// fixup the scope of arguments,no symbols for nondefining function declaration's arguments
// DQ (11/25/2011): templated function so that we can handle both
// SgFunctionDeclaration and SgTemplateFunctionDeclaration (and their associated member
// function derived classes).
ROSE_ASSERT(func != NULL);
ROSE_ASSERT(paralist != NULL);
#if 0
// At this point we don't have cerr and endl defined, so comment this code out.
// Warn to users if a paralist is being shared
if (paralist->get_parent() !=NULL)
{
cerr << "Waring! Setting a used SgFunctionParameterList to function: "
<< (func->get_name()).getString()<<endl
<< " Sharing parameter lists can corrupt symbol tables!"<<endl
<< " Please use deepCopy() to get an exclusive parameter list for each function declaration!"<<endl;
// ROSE_ASSERT(false);
}
#endif
// Liao,2/5/2008 constructor of SgFunctionDeclaration will automatically generate SgFunctionParameterList, so be cautious when set new paralist!!
if (func->get_parameterList() != NULL)
{
if (func->get_parameterList() != paralist)
{
delete func->get_parameterList();
}
}
func->set_parameterList(paralist);
paralist->set_parent(func);
// DQ (5/15/2012): Need to set the declptr in each SgInitializedName IR node.
// This is needed to support the AST Copy mechanism (at least). The files: test2005_150.C,
// test2012_81.C and testcode2012_82.C demonstrate this problem.
SgInitializedNamePtrList & args = paralist->get_args();
for (SgInitializedNamePtrList::iterator i = args.begin(); i != args.end(); i++)
{
(*i)->set_declptr(func);
}
}
//! Set a pragma of a pragma declaration. handle memory release for preexisting pragma, and set parent pointer.
ROSE_DLL_API void setPragma(SgPragmaDeclaration* decl, SgPragma *pragma);
//! Replace an expression with another, used for variable reference substitution and others. the old expression can be deleted (default case) or kept.
ROSE_DLL_API void replaceExpression(SgExpression* oldExp, SgExpression* newExp, bool keepOldExp=false);
//! Replace a given expression with a list of statements produced by a generator
ROSE_DLL_API void replaceExpressionWithStatement(SgExpression* from,
SageInterface::StatementGenerator* to);
//! Similar to replaceExpressionWithStatement, but with more restrictions.
//! Assumptions: from is not within the test of a loop or ifStmt, not currently traversing from or the statement it is in
ROSE_DLL_API void replaceSubexpressionWithStatement(SgExpression* from,
SageInterface::StatementGenerator* to);
//! Set operands for expressions with single operand, such as unary expressions. handle file info, lvalue, pointer downcasting, parent pointer etc.
ROSE_DLL_API void setOperand(SgExpression* target, SgExpression* operand);
//!set left hand operand for binary expressions, transparently downcasting target expressions when necessary
ROSE_DLL_API void setLhsOperand(SgExpression* target, SgExpression* lhs);
//!set left hand operand for binary expression
ROSE_DLL_API void setRhsOperand(SgExpression* target, SgExpression* rhs);
//! Set original expression trees to NULL for SgValueExp or SgCastExp expressions, so you can change the value and have it unparsed correctly.
ROSE_DLL_API void removeAllOriginalExpressionTrees(SgNode* top);
// DQ (1/25/2010): Added support for directories
//! Move file to be generated in a subdirectory (will be generated by the unparser).
ROSE_DLL_API void moveToSubdirectory ( std::string directoryName, SgFile* file );
//! Supporting function to comment relocation in insertStatement() and removeStatement().
ROSE_DLL_API SgStatement* findSurroundingStatementFromSameFile(SgStatement* targetStmt, bool & surroundingStatementPreceedsTargetStatement);
//! Relocate comments and CPP directives from one statement to another.
ROSE_DLL_API void moveCommentsToNewStatement(SgStatement* sourceStatement, const std::vector<int> & indexList, SgStatement* targetStatement, bool surroundingStatementPreceedsTargetStatement);
// DQ (7/19/2015): This is required to support general unparsing of template instantations for the GNU g++
// compiler which does not permit name qualification to be used to support the expression of the namespace
// where a template instantiatoon would be places. Such name qualification would also sometimes require
// global qualification which is also not allowed by the GNU g++ compiler. These issues appear to be
// specific to the GNU compiler versions, at least versions 4.4 through 4.8.
//! Relocate the declaration to be explicitly represented in its associated namespace (required for some backend compilers to process template instantiations).
ROSE_DLL_API void moveDeclarationToAssociatedNamespace ( SgDeclarationStatement* declarationStatement );
ROSE_DLL_API bool isTemplateInstantiationNode(SgNode* node);
ROSE_DLL_API void wrapAllTemplateInstantiationsInAssociatedNamespaces(SgProject* root);
// DQ (12/1/2015): Adding support for fixup internal data struuctures that have references to statements (e.g. macro expansions).
ROSE_DLL_API void resetInternalMapsForTargetStatement(SgStatement* sourceStatement);
// DQ (6/7/2019): Add support for transforming function definitions to function prototypes in a subtree.
// We might have to make this specific to a file (only traversing the functions in that file).
ROSE_DLL_API void convertFunctionDefinitionsToFunctionPrototypes(SgNode* node);
//@}
//------------------------------------------------------------------------
//@{
/*! @name AST repair, fix, and postprocessing.
\brief Mostly used internally when some AST pieces are built without knowing their target
scope/parent, especially during bottom-up construction of AST. The associated symbols,
parent and scope pointers cannot be set on construction then.
A set of utility functions are provided to
patch up scope, parent, symbol for them when the target scope/parent become know.
*/
//! Connect variable reference to the right variable symbols when feasible, return the number of references being fixed.
/*! In AST translation, it is possible to build a variable reference before the variable
is being declared. buildVarRefExp() will use fake initialized name and symbol as placeholders
to get the work done. Users should call fixVariableReference() when AST is complete and all
variable declarations are in place.
*/
ROSE_DLL_API int fixVariableReferences(SgNode* root, bool cleanUnusedSymbol=true);
//!Patch up symbol, scope, and parent information when a SgVariableDeclaration's scope is known.
/*!
It is possible to build a variable declaration without knowing its scope information during bottom-up construction of AST, though top-down construction is recommended in general.
In this case, we have to patch up symbol table, scope and parent information when the scope is known. This function is usually used internally within appendStatment(), insertStatement().
*/
ROSE_DLL_API void fixVariableDeclaration(SgVariableDeclaration* varDecl, SgScopeStatement* scope);
//! Fix symbols, parent and scope pointers. Used internally within appendStatment(), insertStatement() etc when a struct declaration was built without knowing its target scope.
ROSE_DLL_API void fixStructDeclaration(SgClassDeclaration* structDecl, SgScopeStatement* scope);
//! Fix symbols, parent and scope pointers. Used internally within appendStatment(), insertStatement() etc when a class declaration was built without knowing its target scope.
ROSE_DLL_API void fixClassDeclaration(SgClassDeclaration* classDecl, SgScopeStatement* scope);
//! Fix symbols, parent and scope pointers. Used internally within appendStatment(), insertStatement() etc when a namespace declaration was built without knowing its target scope.
ROSE_DLL_API void fixNamespaceDeclaration(SgNamespaceDeclarationStatement* structDecl, SgScopeStatement* scope);
//! Fix symbol table for SgLabelStatement. Used Internally when the label is built without knowing its target scope. Both parameters cannot be NULL.
ROSE_DLL_API void fixLabelStatement(SgLabelStatement* label_stmt, SgScopeStatement* scope);
//! Set a numerical label for a Fortran statement. The statement should have a enclosing function definition already. SgLabelSymbol and SgLabelRefExp are created transparently as needed.
ROSE_DLL_API void setFortranNumericLabel(SgStatement* stmt, int label_value);
//! Suggest next usable (non-conflicting) numeric label value for a Fortran function definition scope
ROSE_DLL_API int suggestNextNumericLabel(SgFunctionDefinition* func_def);
//! Fix the symbol table and set scope (only if scope in declaration is not already set).
ROSE_DLL_API void fixFunctionDeclaration(SgFunctionDeclaration* stmt, SgScopeStatement* scope);
//! Fix the symbol table and set scope (only if scope in declaration is not already set).
ROSE_DLL_API void fixTemplateDeclaration(SgTemplateDeclaration* stmt, SgScopeStatement* scope);
//! A wrapper containing fixes (fixVariableDeclaration(),fixStructDeclaration(), fixLabelStatement(), etc) for all kinds statements. Should be used before attaching the statement into AST.
ROSE_DLL_API void fixStatement(SgStatement* stmt, SgScopeStatement* scope);
// DQ (6/11/2015): This reports the statements that are marked as transformed (used to debug the token-based unparsing).
//! This collects the statements that are marked as transformed (useful in debugging).
ROSE_DLL_API std::set<SgStatement*> collectTransformedStatements( SgNode* node );
//! This collects the statements that are marked as modified (a flag automatically set by all set_* generated functions) (useful in debugging).
ROSE_DLL_API std::set<SgStatement*> collectModifiedStatements( SgNode* node );
//! This collects the SgLocatedNodes that are marked as modified (a flag automatically set by all set_* generated functions) (useful in debugging).
ROSE_DLL_API std::set<SgLocatedNode*> collectModifiedLocatedNodes( SgNode* node );
// DQ (6/5/2019): Use the previously constructed set (above) to reset the IR nodes to be marked as isModified.
//! Use the set of IR nodes and set the isModified flag in each IR node to true.
ROSE_DLL_API void resetModifiedLocatedNodes(const std::set<SgLocatedNode*> & modifiedNodeSet);
// DQ (10/23/2018): Report nodes that are marked as modified.
ROSE_DLL_API void reportModifiedStatements(const std::string & label, SgNode* node);
// DQ (3/22/2019): Translate CPP directives from attached preprocessor information to CPP Directive Declaration IR nodes.
ROSE_DLL_API void translateToUseCppDeclarations( SgNode* n );
ROSE_DLL_API void translateScopeToUseCppDeclarations( SgScopeStatement* scope );
ROSE_DLL_API std::vector<SgC_PreprocessorDirectiveStatement*> translateStatementToUseCppDeclarations( SgStatement* statement, SgScopeStatement* scope);
ROSE_DLL_API void printOutComments ( SgLocatedNode* locatedNode );
ROSE_DLL_API bool skipTranslateToUseCppDeclaration( PreprocessingInfo* currentPreprocessingInfo );
//@}
//! Update defining and nondefining links due to a newly introduced function declaration. Should be used after inserting the function into a scope.
/*! This function not only set the defining and nondefining links of the newly introduced
* function declaration inside a scope, but also update other same function declarations' links
* accordingly if there are any.
* Assumption: The function has already inserted/appended/prepended into the scope before calling this function.
*/
ROSE_DLL_API void updateDefiningNondefiningLinks(SgFunctionDeclaration* func, SgScopeStatement* scope);
//------------------------------------------------------------------------
//@{
/*! @name Advanced AST transformations, analyses, and optimizations
\brief Some complex but commonly used AST transformations.
*/
//! Collect all read and write references within stmt, which can be a function, a scope statement, or a single statement. Note that a reference can be both read and written, like i++
ROSE_DLL_API bool
collectReadWriteRefs(SgStatement* stmt, std::vector<SgNode*>& readRefs, std::vector<SgNode*>& writeRefs, bool useCachedDefUse=false);
//!Collect unique variables which are read or written within a statement. Note that a variable can be both read and written. The statement can be either of a function, a scope, or a single line statement. For accesses to members of aggregate data, we return the coarse grain aggregate mem obj by default.
ROSE_DLL_API bool collectReadWriteVariables(SgStatement* stmt, std::set<SgInitializedName*>& readVars, std::set<SgInitializedName*>& writeVars, bool coarseGrain=true);
//!Collect read only variables within a statement. The statement can be either of a function, a scope, or a single line statement. For accesses to members of aggregate data, we return the coarse grain aggregate mem obj by default.
ROSE_DLL_API void collectReadOnlyVariables(SgStatement* stmt, std::set<SgInitializedName*>& readOnlyVars, bool coarseGrain=true);
//!Collect read only variable symbols within a statement. The statement can be either of a function, a scope, or a single line statement. For accesses to members of aggregate data, we return the coarse grain aggregate mem obj by default.
ROSE_DLL_API void collectReadOnlySymbols(SgStatement* stmt, std::set<SgVariableSymbol*>& readOnlySymbols, bool coarseGrain=true);
//! Check if a variable reference is used by its address: including &a expression and foo(a) when type2 foo(Type& parameter) in C++
ROSE_DLL_API bool isUseByAddressVariableRef(SgVarRefExp* ref);
//! Collect variable references involving use by address: including &a expression and foo(a) when type2 foo(Type& parameter) in C++
ROSE_DLL_API void collectUseByAddressVariableRefs (const SgStatement* s, std::set<SgVarRefExp* >& varSetB);
#ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT
//!Call liveness analysis on an entire project
ROSE_DLL_API LivenessAnalysis * call_liveness_analysis(SgProject* project, bool debug=false);
//!get liveIn and liveOut variables for a for loop from liveness analysis result liv.
ROSE_DLL_API void getLiveVariables(LivenessAnalysis * liv, SgForStatement* loop, std::set<SgInitializedName*>& liveIns, std::set<SgInitializedName*> & liveOuts);
#endif
//!Recognize and collect reduction variables and operations within a C/C++ loop, following OpenMP 3.0 specification for allowed reduction variable types and operation types.
ROSE_DLL_API void ReductionRecognition(SgForStatement* loop, std::set< std::pair <SgInitializedName*, OmpSupport::omp_construct_enum> > & results);
//! Constant folding an AST subtree rooted at 'r' (replacing its children with their constant values, if applicable). Please be advised that constant folding on floating point computation may decrease the accuracy of floating point computations!
/*! It is a wrapper function for ConstantFolding::constantFoldingOptimization(). Note that only r's children are replaced with their corresponding constant values, not the input SgNode r itself. You have to call this upon an expression's parent node if you want to fold the expression. */
ROSE_DLL_API void constantFolding(SgNode* r);
//!Instrument(Add a statement, often a function call) into a function right before the return points, handle multiple return statements (with duplicated statement s) and return expressions with side effects. Return the number of statements inserted.
/*! Useful when adding a runtime library call to terminate the runtime system right before the end of a program, especially for OpenMP and UPC runtime systems. Return with complex expressions with side effects are rewritten using an additional assignment statement.
*/
ROSE_DLL_API int instrumentEndOfFunction(SgFunctionDeclaration * func, SgStatement* s);
//! Remove jumps whose label is immediately after the jump. Used to clean up inlined code fragments.
ROSE_DLL_API void removeJumpsToNextStatement(SgNode*);
//! Remove labels which are not targets of any goto statements
ROSE_DLL_API void removeUnusedLabels(SgNode* top);
//! Remove consecutive labels
ROSE_DLL_API void removeConsecutiveLabels(SgNode* top);
//! Merge a variable assignment statement into a matching variable declaration statement. Callers should make sure the merge is semantically correct (by not introducing compilation errors). This function simply does the merge transformation, without eligibility check.
/*!
* e.g. int i; i=10; becomes int i=10; the original i=10 will be deleted after the merge
* if success, return true, otherwise return false (e.g. variable declaration does not match or already has an initializer)
* The original assignment stmt will be removed by default
* This function is a bit ambiguous about the merge direction, to be phased out.
*/
ROSE_DLL_API bool mergeDeclarationAndAssignment (SgVariableDeclaration* decl, SgExprStatement* assign_stmt, bool removeAssignStmt = true);
//! Merge an assignment into its upstream declaration statement. Callers should make sure the merge is semantically correct.
ROSE_DLL_API bool mergeAssignmentWithDeclaration (SgExprStatement* assign_stmt, SgVariableDeclaration* decl, bool removeAssignStmt = true);
//! Merge a declaration statement into a matching followed variable assignment. Callers should make sure the merge is semantically correct (by not introducing compilation errors). This function simply does the merge transformation, without eligibility check.
/*!
* e.g. int i; i=10; becomes int i=10; the original int i; will be deleted after the merge
*/
ROSE_DLL_API bool mergeDeclarationWithAssignment (SgVariableDeclaration* decl, SgExprStatement* assign_stmt);
//! Split a variable declaration with an rhs assignment into two statements: a declaration and an assignment.
/*! Return the generated assignment statement, if any
* e.g. int i =10; becomes int i; i=10;
* This can be seen as a normalization of declarations
*/
ROSE_DLL_API SgExprStatement* splitVariableDeclaration (SgVariableDeclaration* decl);
//! Split declarations within a scope into declarations and assignment statements, by default only top level declarations are considered. Return the number of declarations split.
ROSE_DLL_API int splitVariableDeclaration (SgScopeStatement* scope, bool topLevelOnly = true);
//! Replace an expression with a temporary variable and an assignment statement
/*!
Add a new temporary variable to contain the value of 'from'
Change reference to 'from' to use this new variable
Assumptions: 'from' is not within the test of a loop or 'if'
not currently traversing 'from' or the statement it is in
*/
ROSE_DLL_API SgAssignInitializer* splitExpression(SgExpression* from, std::string newName = "");
//! Split long expressions into blocks of statements
ROSE_DLL_API void splitExpressionIntoBasicBlock(SgExpression* expr);
//! Remove labeled goto statements
ROSE_DLL_API void removeLabeledGotos(SgNode* top);
//! If the given statement contains any break statements in its body, add a new label below the statement and change the breaks into gotos to that new label.
ROSE_DLL_API void changeBreakStatementsToGotos(SgStatement* loopOrSwitch);
//! Check if the body of a 'for' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfFor(SgForStatement* fs);
//! Check if the body of a 'upc_forall' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfUpcForAll(SgUpcForAllStatement* fs);
//! Check if the body of a 'while' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfWhile(SgWhileStmt* ws);
//! Check if the body of a 'do .. while' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfDoWhile(SgDoWhileStmt* ws);
//! Check if the body of a 'switch' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfSwitch(SgSwitchStatement* ws);
//! Check if the body of a 'case option' statement is a SgBasicBlock, create one if not.
SgBasicBlock* ensureBasicBlockAsBodyOfCaseOption(SgCaseOptionStmt* cs);
//! Check if the body of a 'default option' statement is a SgBasicBlock, create one if not.
SgBasicBlock* ensureBasicBlockAsBodyOfDefaultOption(SgDefaultOptionStmt * cs);
//! Check if the true body of a 'if' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsTrueBodyOfIf(SgIfStmt* ifs);
//! Check if the false body of a 'if' statement is a SgBasicBlock, create one if not when the flag is true.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsFalseBodyOfIf(SgIfStmt* ifs, bool createEmptyBody = true);
//! Check if the body of a 'catch' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfCatch(SgCatchOptionStmt* cos);
//! Check if the body of a SgOmpBodyStatement is a SgBasicBlock, create one if not
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfOmpBodyStmt(SgOmpBodyStatement* ompbodyStmt);
// DQ (1/18/2015): This is added to support better quality token-based unparsing.
//! Remove unused basic block IR nodes added as part of normalization.
ROSE_DLL_API void cleanupNontransformedBasicBlockNode();
// DQ (1/18/2015): This is added to support better quality token-based unparsing.
//! Record where normalization have been done so that we can preform denormalizations as required for the token-based unparsing to generate minimal diffs.
ROSE_DLL_API void recordNormalizations(SgStatement* s);
//! Check if a statement is a (true or false) body of a container-like parent, such as For, Upc_forall, Do-while,
//! switch, If, Catch, OmpBodyStmt, etc
bool isBodyStatement (SgStatement* s);
//! Fix up ifs, loops, while, switch, Catch, OmpBodyStatement, etc. to have blocks as body components. It also adds an empty else body to if statements that don't have them.
void changeAllBodiesToBlocks(SgNode* top, bool createEmptyBody = true);
// The same as changeAllBodiesToBlocks(SgNode* top). Phased out.
//void changeAllLoopBodiesToBlocks(SgNode* top);
//! Make a single statement body to be a basic block. Its parent is if, while, catch, or upc_forall etc.
SgBasicBlock * makeSingleStatementBodyToBlock(SgStatement* singleStmt);
#if 0
/** If s is the body of a loop, catch, or if statement and is already a basic block,
* s is returned unmodified. Otherwise generate a SgBasicBlock between s and its parent
* (a loop, catch, or if statement, etc). */
SgLocatedNode* ensureBasicBlockAsParent(SgStatement* s);
#endif
//! Get the constant value from a constant integer expression; abort on
//! everything else. Note that signed long longs are converted to unsigned.
unsigned long long getIntegerConstantValue(SgValueExp* expr);
//! Get a statement's dependent declarations which declares the types used in the statement. The returned vector of declaration statements are sorted according to their appearance order in the original AST. Any reference to a class or template class from a namespace will treated as a reference to the enclosing namespace.
std::vector<SgDeclarationStatement*> getDependentDeclarations (SgStatement* stmt );
//! Insert an expression (new_exp )before another expression (anchor_exp) has possible side effects, without changing the original semantics. This is achieved by using a comma operator: (new_exp, anchor_exp). The comma operator is returned.
SgCommaOpExp *insertBeforeUsingCommaOp (SgExpression* new_exp, SgExpression* anchor_exp);
//! Insert an expression (new_exp ) after another expression (anchor_exp) has possible side effects, without changing the original semantics. This is done by using two comma operators: type T1; ... ((T1 = anchor_exp, new_exp),T1) )... , where T1 is a temp variable saving the possible side effect of anchor_exp. The top level comma op exp is returned. The reference to T1 in T1 = anchor_exp is saved in temp_ref.
SgCommaOpExp *insertAfterUsingCommaOp (SgExpression* new_exp, SgExpression* anchor_exp, SgStatement** temp_decl = NULL, SgVarRefExp** temp_ref = NULL);
/// \brief moves the body of a function f to a new function f`;
/// f's body is replaced with code that forwards the call to f`.
/// \return a pair indicating the statement containing the call of f`
/// and an initialized name refering to the temporary variable
/// holding the result of f`. In case f returns void
/// the initialized name is NULL.
/// \param definingDeclaration the defining function declaration of f
/// \param newName the name of function f`
/// \details f's new body becomes { f`(...); } and { int res = f`(...); return res; }
/// for functions returning void and a value, respectively.
/// two function declarations are inserted in f's enclosing scope
/// \code
/// result_type f`(...); <--- (1)
/// result_type f (...) { forward call to f` }
/// result_type f`(...) { original code } <--- (2)
/// \endcode
/// Calls to f are not updated, thus in the transformed code all
/// calls will continue calling f (this is also true for
/// recursive function calls from within the body of f`).
/// After the function has created the wrapper,
/// definingDeclaration becomes the wrapper function
/// The definition of f` is the next entry in the
/// statement list; the forward declaration of f` is the previous
/// entry in the statement list.
/// \pre definingDeclaration must be a defining declaration of a
/// free standing function.
/// typeid(SgFunctionDeclaration) == typeid(definingDeclaration)
/// i.e., this function is NOT implemented for class member functions,
/// template functions, procedures, etc.
std::pair<SgStatement*, SgInitializedName*>
wrapFunction(SgFunctionDeclaration& definingDeclaration, SgName newName);
/// \overload
/// \tparam NameGen functor that generates a new name based on the old name.
/// interface: SgName nameGen(const SgName&)
/// \param nameGen name generator
/// \brief see wrapFunction for details
template <class NameGen>
std::pair<SgStatement*, SgInitializedName*>
wrapFunction(SgFunctionDeclaration& definingDeclaration, NameGen nameGen)
{
return wrapFunction(definingDeclaration, nameGen(definingDeclaration.get_name()));
}
/// \brief convenience function that returns the first initialized name in a
/// list of variable declarations.
SgInitializedName& getFirstVariable(SgVariableDeclaration& vardecl);
//@}
// DQ (6/7/2012): Unclear where this function should go...
bool hasTemplateSyntax( const SgName & name );
#if 0
//------------------------AST dump, stringify-----------------------------
//------------------------------------------------------------------------
std::string buildOperatorString ( SgNode* astNode ); //transformationSupport.h
// do we need these?
std::string dump_node(const SgNode* astNode);
std::string dump_tree(const SgNode* astNode);
// or a friendly version of unparseToString(), as a memeber function
std::string SgNode::toString(bool asSubTree=true); // dump node or subtree
//----------------------------AST comparison------------------------------
//------------------------------------------------------------------------
// How to get generic functions for comparison?
bool isNodeEqual(SgNode* node1, SgNode* node2); //?
bool isTreeEqual(SgNode* tree1, SgNode* tree2);
//! Are two expressions equal (using a deep comparison)?
bool expressionTreeEqual(SgExpression*, SgExpression*);
//! Are corresponding expressions in two lists equal (using a deep comparison)?
bool expressionTreeEqualStar(const SgExpressionPtrList&,
const SgExpressionPtrList&);
//----------------------AST verfication/repair----------------------------
//------------------------------------------------------------------------
// sanity check of AST subtree, any suggestions?
// TODO
verifySgNode(SgNode* node, bool subTree=true);
//src/midend/astDiagnostics/AstConsistencyTests.h
// AstTests::runAllTests(SgProject * )
//src/midend/astUtil/astInterface/AstInterface.h.C
//FixSgProject(SgProject &project)
//FixSgTree(SgNode* r)
//src/frontend/SageIII/astPostProcessing
//AstPostProcessing(SgNode * node)
//--------------------------AST modification------------------------------
//------------------------------------------------------------------------
// any operations changing AST tree, including
// insert, copy, delete(remove), replace
// insert before or after some point, argument list is consistent with LowLevelRewrite
void insertAst(SgNode* targetPosition, SgNode* newNode, bool insertBefore=true);
// previous examples
//void myStatementInsert(SgStatement* target,...)
// void AstInterfaceBase::InsertStmt(AstNodePtr const & orig, AstNodePtr const &n, bool insertbefore, bool extractfromBasicBlock)
// copy
// copy children of one basic block to another basic block
//void appendStatementCopy (const SgBasicBlock* a, SgBasicBlock* b);
void copyStatements (const SgBasicBlock* src, SgBasicBlock* dst);
// delete (remove) a node or a whole subtree
void removeSgNode(SgNode* targetNode); // need this?
void removeSgNodeTree(SgNode* subtree); // need this?
void removeStatement( SgStatement* targetStmt);
//Move = delete + insert
void moveAst (SgNode* src, SgNode* target); // need this?
// similar to
void moveStatements (SgBasicBlock* src, SgBasicBlock* target);
// replace= delete old + insert new (via building or copying)
// DQ (1/25/2010): This does not appear to exist as a definition anywhere in ROSE.
// void replaceAst(SgNode* oldNode, SgNode* newNode);
//void replaceChild(SgNode* parent, SgNode* from, SgNode* to);
//bool AstInterface::ReplaceAst( const AstNodePtr& orig, const AstNodePtr& n)
//--------------------------AST transformations---------------------------
//------------------------------------------------------------------------
// Advanced AST modifications through basic AST modifications
// Might not be included in AST utitlity list, but listed here for the record.
// extract statements/content from a scope
void flattenBlocks(SgNode* n);
//src/midend/astInlining/inlinerSupport.h
void renameVariables(SgNode* n);
void renameLabels(SgNode* n, SgFunctionDefinition* enclosingFunctionDefinition);
void simpleCopyAndConstantPropagation(SgNode* top);
void changeAllMembersToPublic(SgNode* n);
void removeVariableDeclaration(SgInitializedName* initname);
//! Convert something like "int a = foo();" into "int a; a = foo();"
SgAssignOp* convertInitializerIntoAssignment(SgAssignInitializer* init);
//! Rewrites a while or for loop so that the official test is changed to
//! "true" and what had previously been the test is now an if-break
//! combination (with an inverted condition) at the beginning of the loop
//! body
void pushTestIntoBody(LoopStatement* loopStmt);
//programTransformation/finiteDifferencing/finiteDifferencing.h
//! Move variables declared in a for statement to just outside that statement.
void moveForDeclaredVariables(SgNode* root);
//------------------------ Is/Has functions ------------------------------
//------------------------------------------------------------------------
// misc. boolean functions
// some of them could moved to SgXXX class as a member function
bool isOverloaded (SgFunctionDeclaration * functionDeclaration);
bool isSwitchCond (const SgStatement* s);
bool isIfCond (const SgStatement* s);
bool isWhileCond (const SgStatement* s);
bool isStdNamespace (const SgScopeStatement* scope);
bool isTemplateInst (const SgDeclarationStatement* decl);
bool isCtor (const SgFunctionDeclaration* func);
bool isDtor (const SgFunctionDeclaration* func);
// src/midend/astInlining/typeTraits.h
bool hasTrivialDestructor(SgType* t);
ROSE_DLL_API bool isNonconstReference(SgType* t);
ROSE_DLL_API bool isReferenceType(SgType* t);
// generic ones, or move to the SgXXX class as a member function
bool isConst(SgNode* node); // const type, variable, function, etc.
// .... and more
bool isConstType (const SgType* type);
bool isConstFunction (const SgFunctionDeclaration* decl);
bool isMemberVariable(const SgInitializedName & var);
//bool isMemberVariable(const SgNode& in);
bool isPrototypeInScope (SgScopeStatement * scope,
SgFunctionDeclaration * functionDeclaration,
SgDeclarationStatement * startingAtDeclaration);
bool MayRedefined(SgExpression* expr, SgNode* root);
// bool isPotentiallyModified(SgExpression* expr, SgNode* root); // inlinderSupport.h
bool hasAddressTaken(SgExpression* expr, SgNode* root);
//src/midend/astInlining/inlinerSupport.C
// can also classified as topdown search
bool containsVariableReference(SgNode* root, SgInitializedName* var);
bool isDeclarationOf(SgVariableDeclaration* decl, SgInitializedName* var);
bool isPotentiallyModifiedDuringLifeOf(SgBasicBlock* sc,
SgInitializedName* toCheck,
SgInitializedName* lifetime)
//src/midend/programTransformation/partialRedundancyElimination/pre.h
bool anyOfListPotentiallyModifiedIn(const std::vector<SgVariableSymbol*>& syms, SgNode* n);
//------------------------ loop handling ---------------------------------
//------------------------------------------------------------------------
//get and set loop control expressions
// 0: init expr, 1: condition expr, 2: stride expr
SgExpression* getForLoopTripleValues(int valuetype,SgForStatement* forstmt );
int setForLoopTripleValues(int valuetype,SgForStatement* forstmt, SgExpression* exp);
bool isLoopIndexVarRef(SgForStatement* forstmt, SgVarRefExp *varref);
SgInitializedName * getLoopIndexVar(SgForStatement* forstmt);
//------------------------expressions-------------------------------------
//------------------------------------------------------------------------
//src/midend/programTransformation/partialRedundancyElimination/pre.h
int countComputationsOfExpressionIn(SgExpression* expr, SgNode* root);
//src/midend/astInlining/replaceExpressionWithStatement.h
void replaceAssignmentStmtWithStatement(SgExprStatement* from, StatementGenerator* to);
void replaceSubexpressionWithStatement(SgExpression* from,
StatementGenerator* to);
SgExpression* getRootOfExpression(SgExpression* n);
//--------------------------preprocessing info. -------------------------
//------------------------------------------------------------------------
//! Removes all preprocessing information at a given position.
void cutPreprocInfo (SgBasicBlock* b,
PreprocessingInfo::RelativePositionType pos,
AttachedPreprocessingInfoType& save_buf);
//! Pastes preprocessing information at the front of a statement.
void pastePreprocInfoFront (AttachedPreprocessingInfoType& save_buf,
SgStatement* s);
//! Pastes preprocessing information at the back of a statement.
void pastePreprocInfoBack (AttachedPreprocessingInfoType& save_buf,
SgStatement* s);
/*!
* \brief Moves 'before' preprocessing information.
* Moves all preprocessing information attached 'before' the source
* statement to the front of the destination statement.
*/
// a generic one for all
/// void movePreprocessingInfo(src, dest, RelativePositionType);
void moveBeforePreprocInfo (SgStatement* src, SgStatement* dest);
void moveInsidePreprocInfo (SgBasicBlock* src, SgBasicBlock* dest);
void moveAfterPreprocInfo (SgStatement* src, SgStatement* dest);
//--------------------------------operator--------------------------------
//------------------------------------------------------------------------
from transformationSupport.h, not sure if they should be included here
/* return enum code for SAGE operators */
operatorCodeType classifyOverloadedOperator(); // transformationSupport.h
/*! \brief generates a source code string from operator name.
This function returns a string representing the elementwise operator (for primative types)
that would be match that associated with the overloaded operator for a user-defined
abstractions (e.g. identifyOperator("operator+()") returns "+").
*/
std::string stringifyOperator (std::string name);
//--------------------------------macro ----------------------------------
//------------------------------------------------------------------------
std::string buildMacro ( std::string s ); //transformationSupport.h
//--------------------------------access functions---------------------------
//----------------------------------get/set sth.-----------------------------
// several categories:
* get/set a direct child/grandchild node or fields
* get/set a property flag value
* get a descendent child node using preorder searching
* get an ancestor node using bottomup/reverse searching
// SgName or string?
std::string getFunctionName (SgFunctionCallExp* functionCallExp);
std::string getFunctionTypeName ( SgFunctionCallExp* functionCallExpression );
// do we need them anymore? or existing member functions are enought?
// a generic one:
std::string get_name (const SgNode* node);
std::string get_name (const SgDeclarationStatement * declaration);
// get/set some property: should moved to SgXXX as an inherent memeber function?
// access modifier
void setExtern (SgFunctionDeclartion*)
void clearExtern()
// similarly for other declarations and other properties
void setExtern (SgVariableDeclaration*)
void setPublic()
void setPrivate()
#endif
// DQ (1/23/2013): Added support for generated a set of source sequence entries.
std::set<unsigned int> collectSourceSequenceNumbers( SgNode* astNode );
//--------------------------------Type Traits (C++)---------------------------
bool HasNoThrowAssign(const SgType * const inputType);
bool HasNoThrowCopy(const SgType * const inputType);
bool HasNoThrowConstructor(const SgType * const inputType);
bool HasTrivialAssign(const SgType * const inputType);
bool HasTrivialCopy(const SgType * const inputType);
bool HasTrivialConstructor(const SgType * const inputType);
bool HasTrivialDestructor(const SgType * const inputType);
bool HasVirtualDestructor(const SgType * const inputType);
bool IsBaseOf(const SgType * const inputBaseType, const SgType * const inputDerivedType);
bool IsAbstract(const SgType * const inputType);
bool IsClass(const SgType * const inputType);
bool IsEmpty(const SgType * const inputType);
bool IsEnum(const SgType * const inputType);
bool IsPod(const SgType * const inputType);
bool IsPolymorphic(const SgType * const inputType);
bool IsStandardLayout(const SgType * const inputType);
bool IsLiteralType(const SgType * const inputType);
bool IsTrivial(const SgType * const inputType);
bool IsUnion(const SgType * const inputType);
SgType * UnderlyingType(SgType *type);
// DQ (3/2/2014): Added a new interface function (used in the snippet insertion support).
// void supportForInitializedNameLists ( SgScopeStatement* scope, SgInitializedNamePtrList & variableList );
// DQ (3/4/2014): Added support for testing two trees for equivalents using the AST iterators.
bool isStructurallyEquivalentAST( SgNode* tree1, SgNode* tree2 );
// JP (10/14/24): Moved code to evaluate a const integer expression (like in array size definitions) to SageInterface
/*! The datastructure is used as the return type for SageInterface::evaluateConstIntegerExpression(). One needs to always check whether hasValue_ is true before accessing value_ */
struct const_int_expr_t {
size_t value_;
bool hasValue_;
};
/*! \brief The function tries to evaluate const integer expressions (such as are used in array dimension sizes). It follows variable symbols, and requires constness. */
struct const_int_expr_t evaluateConstIntegerExpression(SgExpression *expr);
// JP (9/17/14): Added function to test whether two SgType* are equivalent or not
bool checkTypesAreEqual(SgType *typeA, SgType *typeB);
//--------------------------------Java interface functions ---------------------
#ifdef ROSE_BUILD_JAVA_LANGUAGE_SUPPORT
ROSE_DLL_API std::string getTempDirectory(SgProject *project);
ROSE_DLL_API void destroyTempDirectory(std::string);
ROSE_DLL_API SgFile *processFile(SgProject *, std::string, bool unparse = false);
ROSE_DLL_API std::string preprocessPackage(SgProject *, std::string);
ROSE_DLL_API std::string preprocessImport(SgProject *, std::string);
ROSE_DLL_API SgFile* preprocessCompilationUnit(SgProject *, std::string, std::string, bool unparse = true);
ROSE_DLL_API SgClassDefinition *findJavaPackage(SgScopeStatement *, std::string);
ROSE_DLL_API SgClassDefinition *findOrInsertJavaPackage(SgProject *, std::string, bool create_directory = false);
ROSE_DLL_API SgClassDeclaration *findOrImportJavaClass(SgProject *, SgClassDefinition *package_definition, std::string);
ROSE_DLL_API SgClassDeclaration *findOrImportJavaClass(SgProject *, std::string, std::string);
ROSE_DLL_API SgClassDeclaration *findOrImportJavaClass(SgProject *, SgClassType *);
ROSE_DLL_API SgMemberFunctionDeclaration *findJavaMain(SgClassDefinition *);
ROSE_DLL_API SgMemberFunctionDeclaration *findJavaMain(SgClassType *);
#endif // ROSE_BUILD_JAVA_LANGUAGE_SUPPORT
// DQ (8/31/2016): Making this a template function so that we can have it work with user defined filters.
//! This function detects template instantiations that are relevant when filters are used.
/*!
EDG normalizes some in-class template functions and member functions to be redefined outside of a class. this causes the associated template instantiations
to be declared outside of the class, and to be marked as compiler generated (since the compiler generated form outside of the class declaration).
ROSE captures the function definitions, but in the new location (defined outside of the class declaration). This can confuse some simple tests
for template instantiations that are a part of definitions in a file, thus we have this function to detect this specific normalization.
*/
template < class T >
bool isTemplateInstantiationFromTemplateDeclarationSatisfyingFilter (SgFunctionDeclaration* function, T* filter )
{
// DQ (9/1/2016): This function is called in the Call graph generation to avoid filtering out EDG normalized
// function template instnatiations (which come from normalized template functions and member functions).
// Note that because of the EDG normailzation the membr function is moved outside of the class, and
// thus marked as compiler generated. However the template instantiations are always marked as compiler
// generated (if not specializations) and so we want to include a template instantiation that is marked
// as compiler generated, but is from a template declaration that satisfyied a specific user defined filter.
// The complexity of this detection is isolated here, but knowing that it must be called is more complex.
// This function is call in the CG.C file of tests/nonsmoke/functional/roseTests/programAnalysisTests/testCallGraphAnalysis.
bool retval = false;
#define DEBUG_TEMPLATE_NORMALIZATION_DETECTION 0
#if DEBUG_TEMPLATE_NORMALIZATION_DETECTION
printf ("In isNormalizedTemplateInstantiation(): function = %p = %s = %s \n",function,function->class_name().c_str(),function->get_name().str());
#endif
// Test for this to be a template instantation (in which case it was marked as
// compiler generated but we may want to allow it to be used in the call graph,
// if it's template was a part was defined in the current directory).
SgTemplateInstantiationFunctionDecl* templateInstantiationFunction = isSgTemplateInstantiationFunctionDecl(function);
SgTemplateInstantiationMemberFunctionDecl* templateInstantiationMemberFunction = isSgTemplateInstantiationMemberFunctionDecl(function);
if (templateInstantiationFunction != NULL)
{
// When the defining function has been normalized by EDG, only the non-defining declaration will have a source position.
templateInstantiationFunction = isSgTemplateInstantiationFunctionDecl(templateInstantiationFunction->get_firstNondefiningDeclaration());
SgTemplateFunctionDeclaration* templateFunctionDeclaration = templateInstantiationFunction->get_templateDeclaration();
if (templateFunctionDeclaration != NULL)
{
retval = filter->operator()(templateFunctionDeclaration);
}
else
{
// Assume false.
}
#if DEBUG_TEMPLATE_NORMALIZATION_DETECTION
printf (" --- case of templateInstantiationFunction: retval = %s \n",retval ? "true" : "false");
#endif
}
else
{
if (templateInstantiationMemberFunction != NULL)
{
// When the defining function has been normalized by EDG, only the non-defining declaration will have a source position.
templateInstantiationMemberFunction = isSgTemplateInstantiationMemberFunctionDecl(templateInstantiationMemberFunction->get_firstNondefiningDeclaration());
SgTemplateMemberFunctionDeclaration* templateMemberFunctionDeclaration = templateInstantiationMemberFunction->get_templateDeclaration();
if (templateMemberFunctionDeclaration != NULL)
{
retval = filter->operator()(templateMemberFunctionDeclaration);
}
else
{
// Assume false.
}
#if DEBUG_TEMPLATE_NORMALIZATION_DETECTION
printf (" --- case of templateInstantiationMemberFunction: retval = %s \n",retval ? "true" : "false");
#endif
}
}
return retval;
}
void detectCycleInType(SgType * type, const std::string & from);
}// end of namespace
#endif
|
GB_binop__max_uint64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__max_uint64
// A.*B function (eWiseMult): GB_AemultB__max_uint64
// A*D function (colscale): GB_AxD__max_uint64
// D*A function (rowscale): GB_DxB__max_uint64
// C+=B function (dense accum): GB_Cdense_accumB__max_uint64
// C+=b function (dense accum): GB_Cdense_accumb__max_uint64
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__max_uint64
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__max_uint64
// C=scalar+B GB_bind1st__max_uint64
// C=scalar+B' GB_bind1st_tran__max_uint64
// C=A+scalar GB_bind2nd__max_uint64
// C=A'+scalar GB_bind2nd_tran__max_uint64
// C type: uint64_t
// A type: uint64_t
// B,b type: uint64_t
// BinaryOp: cij = GB_IMAX (aij, bij)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_IMAX (x, y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MAX || GxB_NO_UINT64 || GxB_NO_MAX_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__max_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__max_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__max_uint64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__max_uint64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__max_uint64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__max_uint64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__max_uint64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__max_uint64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__max_uint64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = Bx [p] ;
Cx [p] = GB_IMAX (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__max_uint64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = Ax [p] ;
Cx [p] = GB_IMAX (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = Ax [pA] ; \
Cx [pC] = GB_IMAX (x, aij) ; \
}
GrB_Info GB_bind1st_tran__max_uint64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = Ax [pA] ; \
Cx [pC] = GB_IMAX (aij, y) ; \
}
GrB_Info GB_bind2nd_tran__max_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d25pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 24;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=floord(Nt-1,3);t1++) {
lbp=max(ceild(t1,2),ceild(6*t1-Nt+2,6));
ubp=min(floord(4*Nt+Nz-9,24),floord(12*t1+Nz+6,24));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-1,2)),ceild(24*t2-Nz-11,24));t3<=min(min(min(floord(4*Nt+Ny-9,24),floord(12*t1+Ny+15,24)),floord(24*t2+Ny+11,24)),floord(24*t1-24*t2+Nz+Ny+13,24));t3++) {
for (t4=max(max(max(max(0,ceild(3*t1-3*t2-126,128)),ceild(3*t1-254,256)),ceild(24*t2-Nz-1011,1024)),ceild(24*t3-Ny-1011,1024));t4<=min(min(min(min(floord(4*Nt+Nx-9,1024),floord(12*t1+Nx+15,1024)),floord(24*t2+Nx+11,1024)),floord(24*t3+Nx+11,1024)),floord(24*t1-24*t2+Nz+Nx+13,1024));t4++) {
for (t5=max(max(max(max(max(0,ceild(24*t2-Nz+5,4)),ceild(24*t3-Ny+5,4)),ceild(1024*t4-Nx+5,4)),3*t1),6*t1-6*t2+1);t5<=min(min(min(min(min(floord(24*t1-24*t2+Nz+18,4),Nt-1),3*t1+5),6*t2+4),6*t3+4),256*t4+254);t5++) {
for (t6=max(max(24*t2,4*t5+4),-24*t1+24*t2+8*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(24*t3,4*t5+4);t7<=min(24*t3+23,4*t5+Ny-5);t7++) {
lbv=max(1024*t4,4*t5+4);
ubv=min(1024*t4+1023,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
simpleTest.c | #include <stdlib.h>
#include <stdio.h>
#include <omp.h>
#include <likwid-marker.h>
#define N 10000
int main(int argc, char* argv[])
{
int i;
double data[N];
#pragma omp parallel
{
LIKWID_MARKER_REGISTER("foo");
}
#pragma omp parallel
{
LIKWID_MARKER_START("foo");
#pragma omp for
for(i = 0; i < N; i++)
{
data[i] = omp_get_thread_num();
}
LIKWID_MARKER_STOP("foo");
}
return 0;
}
|
dSchCompUdt-2Ddynamic.c |
/*! @file
* \brief THis file contains the main loop of pdgstrf which involves rank k
* update of the Schur complement.
* Uses 2D partitioning for the scatter phase.
*
* <pre>
* -- Distributed SuperLU routine (version 4.1) --
* Lawrence Berkeley National Lab, Univ. of California Berkeley.
* October 1, 2014
*
*/
#define SCHEDULE_STRATEGY guided
double tt_start;
double tt_end;
if ( msg0 && msg2 ) { /* L(:,k) and U(k,:) are not empty. */
int cum_nrow=0;
int temp_nbrow;
lptr = lptr0;
luptr = luptr0;
/**
* seperating L blocks
*/
int lookAheadBlk=0, RemainBlk=0;
tt_start = SuperLU_timer_();
for (int i = 0; i < nlb; ++i) {
ib = lsub[lptr]; /* Row block L(i,k). */
temp_nbrow = lsub[lptr+1]; /* Number of full rows. */
int look_up_flag=1;
for (int j = k0+1; j < SUPERLU_MIN (k0 + num_look_aheads+2, nsupers ); ++j)
{
if(ib == perm_c_supno[j]) look_up_flag=0;
}
if(!look_up_flag) {
/* ib is within look up window */
if (lookAheadBlk==0) {
lookAheadFullRow[lookAheadBlk] = temp_nbrow;
} else {
lookAheadFullRow[lookAheadBlk] = temp_nbrow+lookAheadFullRow[lookAheadBlk-1];
}
lookAheadStRow[lookAheadBlk] = cum_nrow;
lookAhead_lptr[lookAheadBlk] = lptr;
lookAhead_ib[lookAheadBlk] = ib;
lookAheadBlk++;
} else { /* ib is not in look up window */
if (RemainBlk==0) {
Remain_info[RemainBlk].FullRow = temp_nbrow;
} else {
Remain_info[RemainBlk].FullRow = temp_nbrow+Remain_info[RemainBlk-1].FullRow;
}
RemainStRow[RemainBlk] = cum_nrow;
// Remain_lptr[RemainBlk] = lptr;
Remain_info[RemainBlk].lptr = lptr;
// Remain_ib[RemainBlk] = ib;
Remain_info[RemainBlk].ib = ib;
RemainBlk++;
}
cum_nrow +=temp_nbrow;
lptr += LB_DESCRIPTOR; /* Skip descriptor. */
lptr += temp_nbrow;
luptr += temp_nbrow;
} /* for i ... */
lptr = lptr0;
luptr = luptr0;
/* leading dimension of L buffer */
#if 0
int LDlookAhead_LBuff = lookAheadFullRow[lookAheadBlk-1]; /* may go negative.*/
#else /* Piyush fix */
int LDlookAhead_LBuff = lookAheadBlk==0? 0 :lookAheadFullRow[lookAheadBlk-1];
#endif
/* #pragma omp parallel for */
for (int i = 0; i < lookAheadBlk; ++i) {
int StRowDest = 0;
int temp_nbrow;
if (i==0) {
temp_nbrow = lookAheadFullRow[0];
} else {
StRowDest = lookAheadFullRow[i-1];
temp_nbrow = lookAheadFullRow[i]-lookAheadFullRow[i-1];
}
int StRowSource=lookAheadStRow[i];
/* Now copying the matrix*/
// #pragma omp parallel for (gives slow down)
for (int j = 0; j < knsupc; ++j) {
memcpy(&lookAhead_L_buff[StRowDest+j*LDlookAhead_LBuff],
&lusup[luptr+j*nsupr+StRowSource],
temp_nbrow * sizeof(double) );
}
}
int LDRemain_LBuff = RemainBlk==0 ? 0 : Remain_info[RemainBlk-1].FullRow;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < RemainBlk; ++i) {
int StRowDest = 0;
int temp_nbrow;
if (i==0) {
temp_nbrow = Remain_info[0].FullRow;
} else {
StRowDest = Remain_info[i-1].FullRow;
temp_nbrow = Remain_info[i].FullRow-Remain_info[i-1].FullRow;
}
int StRowSource=RemainStRow[i];
/* Now copying the matrix*/
// #pragma omp parallel for (gives slow down)
for (int j = 0; j < knsupc; ++j) {
// printf("StRowDest %d LDRemain_LBuff %d StRowSource %d \n", StRowDest ,LDRemain_LBuff ,StRowSource );
memcpy(&Remain_L_buff[StRowDest+j*LDRemain_LBuff],
&lusup[luptr+j*nsupr+StRowSource],
temp_nbrow * sizeof(double) );
}
} /* parallel for i ... */
tt_end = SuperLU_timer_();
LookAheadRowSepTimer += tt_end-tt_start;
#if 0
LookAheadRowSepMOP += 2*knsupc*(lookAheadFullRow[lookAheadBlk-1]+Remain_info[RemainBlk-1].FullRow );
#else
int_t lnbrow, rnbrow;
lnbrow = lookAheadBlk==0 ? 0 : lookAheadFullRow[lookAheadBlk-1];
rnbrow = RemainBlk==0 ? 0 : Remain_info[RemainBlk-1].FullRow;
nbrow = lnbrow + rnbrow;
LookAheadRowSepMOP += 2*knsupc*(nbrow);
#endif
ldu =0;
full =1;
/*updating lookahead rows */
tt_start = SuperLU_timer_();
#if 0
nbrow = lookAheadFullRow[lookAheadBlk-1]+Remain_info[RemainBlk-1].FullRow;
#endif
if ( nbrow>0 ) {
/*
* counting U blocks
*/
ncols=0;
ldu=0;
full=1;
int temp_ncols=0;
for (j = jj0; j < nub; ++j) {
temp_ncols=0;
arrive_at_ublock(
j,&iukp,&rukp,&jb,&ljb,&nsupc,
iukp0,rukp0,usub,perm_u,xsup,grid
);
Ublock_info[j].iukp = iukp;
Ublock_info[j].rukp = rukp;
Ublock_info[j].jb = jb;
/* Prepare to call GEMM. */
jj = iukp;
for (; jj < iukp+nsupc; ++jj) {
segsize = klst - usub[jj];
if ( segsize ) {
++temp_ncols;
if ( segsize != ldu ) full = 0;
if ( segsize > ldu ) ldu = segsize;
}
}
Ublock_info[j].full_u_cols = temp_ncols;
ncols += temp_ncols;
}
/* Now doing prefix sum on on full_u_cols */
for ( j = jj0+1; j < nub; ++j) {
Ublock_info[j].full_u_cols += Ublock_info[j-1].full_u_cols;
}
tempu = bigU;
#ifdef _OPENMP
#pragma omp parallel for private(j,iukp,rukp,tempu, jb, nsupc,ljb,segsize,\
lead_zero, jj, i) \
default (shared) schedule(SCHEDULE_STRATEGY)
#endif
for (j = jj0; j < nub; ++j) {
if(j==jj0) tempu = bigU;
else tempu = bigU + ldu*Ublock_info[j-1].full_u_cols;
/* == processing each of the remaining columns == */
arrive_at_ublock(j,&iukp,&rukp,&jb,&ljb,&nsupc,
iukp0,rukp0,usub,perm_u,xsup,grid);
for (jj = iukp; jj < iukp+nsupc; ++jj) {
segsize = klst - usub[jj];
if ( segsize ) {
lead_zero = ldu - segsize;
for (i = 0; i < lead_zero; ++i) tempu[i] = zero;
tempu += lead_zero;
for (i = 0; i < segsize; ++i) tempu[i] = uval[rukp+i];
rukp += segsize;
tempu += segsize;
}
}
rukp -= usub[iukp - 1]; /* Return to start of U(k,j). */
} /* parallel for j:jjj_st..jjj */
tempu = bigU; //setting it to starting of the matrix
} /* if(nbrow>0) */
tt_end = SuperLU_timer_();
GatherTimer += tt_end-tt_start;
GatherMOP += 2*ldu*ncols;
int Lnbrow = lookAheadBlk==0 ? 0 :lookAheadFullRow[lookAheadBlk-1];
int Rnbrow = RemainBlk==0 ? 0 : Remain_info[RemainBlk-1].FullRow;
int jj_cpu=nub; /*limit between CPU and GPU */
tempv = bigV;
if (Lnbrow>0 && ldu >0 && ncols>0) {
ncols = Ublock_info[nub-1].full_u_cols;
schur_flop_counter += 2 * (double)Lnbrow * (double)ldu * (double)ncols;
stat->ops[FACT] += 2 * (double)Lnbrow * (double)ldu * (double)ncols;
tt_start = SuperLU_timer_();
#ifdef _OPENMP
#pragma omp parallel for default (shared) \
private (j,i,lb,rukp,iukp,jb,nsupc,ljb,lptr,ib,temp_nbrow,cum_nrow) \
schedule(dynamic)
#endif
for (int ij = 0; ij < lookAheadBlk*(nub-jj0); ++ij) {
int j = ij/lookAheadBlk + jj0;
int lb = ij%lookAheadBlk;
#ifdef _OPENMP
int thread_id = omp_get_thread_num();
#else
int thread_id = 0;
#endif
int* indirect_thread = indirect + ldt*thread_id;
int* indirect2_thread = indirect2 + ldt*thread_id;
double* tempv1 = bigV + thread_id*ldt*ldt;
/* Getting U block information */
/* unsigned long long ut_start, ut_end; */
int_t rukp = Ublock_info[j].rukp;
int_t iukp = Ublock_info[j].iukp;
int jb = Ublock_info[j].jb;
int nsupc = SuperSize(jb);
int ljb = LBj (jb, grid);
int st_col;
int ncols;
if (j>jj0) {
ncols = Ublock_info[j].full_u_cols-Ublock_info[j-1].full_u_cols;
st_col = Ublock_info[j-1].full_u_cols;
} else {
ncols = Ublock_info[j].full_u_cols;
st_col = 0;
}
/* Getting L block information */
int_t lptr = lookAhead_lptr[lb];
int ib = lookAhead_ib[lb];
int temp_nbrow = lsub[lptr+1];
lptr += LB_DESCRIPTOR;
int cum_nrow = (lb==0 ? 0 : lookAheadFullRow[lb-1]);
#if defined (USE_VENDOR_BLAS)
dgemm_("N", "N", &temp_nbrow, &ncols, &ldu, &alpha,
&lookAhead_L_buff[(knsupc-ldu)*Lnbrow+cum_nrow], &Lnbrow,
&tempu[st_col*ldu], &ldu, &beta, tempv1, &temp_nbrow, 1, 1);
#else
dgemm_("N", "N", &temp_nbrow, &ncols, &ldu, &alpha,
&lookAhead_L_buff[(knsupc-ldu)*Lnbrow+cum_nrow], &Lnbrow,
&tempu[st_col*ldu], &ldu, &beta, tempv1, &temp_nbrow);
#endif
if ( ib < jb ) {
dscatter_u (
ib, jb,
nsupc, iukp,xsup,
klst, temp_nbrow,
lptr, temp_nbrow,lsub,
usub, tempv1,
Ufstnz_br_ptr,
Unzval_br_ptr,
grid
);
} else {
dscatter_l (
ib, ljb, nsupc,iukp,xsup,klst,temp_nbrow,lptr,
temp_nbrow,usub,lsub,tempv1,
indirect_thread, indirect2_thread,
Lrowind_bc_ptr,Lnzval_bc_ptr,grid
);
}
} /* for ij = ... */
tt_end = SuperLU_timer_();
LookAheadGEMMTimer += tt_end- tt_start;
LookAheadGEMMFlOp += 2 * (double ) Lnbrow * (double )ldu * (double )ncols;
stat->ops[FACT] += 2 * (double ) Lnbrow * (double )ldu * (double )ncols;
LookAheadScatterTimer += tt_end-tt_start;
LookAheadScatterMOP += 3*Lnbrow*ncols;
} /* if Lnbrow < ... */
/***************************************************************
* Updating remaining rows and column on CPU
***************************************************************/
Rnbrow = RemainBlk==0 ? 0 : Remain_info[RemainBlk-1].FullRow;
ncols = jj_cpu==0 ? 0 : Ublock_info[jj_cpu-1].full_u_cols;
schur_flop_counter += 2 * (double)Rnbrow * (double)ldu * (double)ncols;
stat->ops[FACT] += 2 * (double)Rnbrow * (double)ldu * (double)ncols;
tt_start = SuperLU_timer_();
#ifdef _OPENMP
#pragma omp parallel for default (shared) \
private (j,i,lb,rukp,iukp,jb,nsupc,ljb,lptr,ib,temp_nbrow,cum_nrow) \
schedule(dynamic)
#endif
for (int ij = 0; ij < RemainBlk*(jj_cpu-jj0); ++ij) {
int j = ij / RemainBlk + jj0;
int lb = ij % RemainBlk;
#ifdef _OPENMP
int thread_id = omp_get_thread_num();
#else
int thread_id = 0;
#endif
int* indirect_thread = indirect + ldt*thread_id;
int* indirect2_thread = indirect2 + ldt*thread_id;
double* tempv1 = bigV + thread_id*ldt*ldt;
/* Getting U block information */
/* unsigned long long ut_start, ut_end; */
int_t rukp = Ublock_info[j].rukp;
int_t iukp = Ublock_info[j].iukp;
int jb = Ublock_info[j].jb;
int nsupc = SuperSize(jb);
int ljb = LBj (jb, grid);
int st_col;
int ncols;
if (j>jj0) {
ncols = Ublock_info[j].full_u_cols-Ublock_info[j-1].full_u_cols;
st_col = Ublock_info[j-1].full_u_cols;
} else {
ncols = Ublock_info[j].full_u_cols;
st_col = 0;
}
/* Getting L block information */
int_t lptr = Remain_info[lb].lptr;
int ib = Remain_info[lb].ib;
int temp_nbrow = lsub[lptr+1];
lptr += LB_DESCRIPTOR;
int cum_nrow = (lb==0 ? 0 : Remain_info[lb-1].FullRow);
/* calling GEMM */
#if defined (USE_VENDOR_BLAS)
dgemm_("N", "N", &temp_nbrow, &ncols, &ldu, &alpha,
&Remain_L_buff[(knsupc-ldu)*Rnbrow+cum_nrow], &Rnbrow,
&tempu[st_col*ldu], &ldu, &beta, tempv1, &temp_nbrow, 1, 1);
#else
dgemm_("N", "N", &temp_nbrow, &ncols, &ldu, &alpha,
&Remain_L_buff[(knsupc-ldu)*Rnbrow+cum_nrow], &Rnbrow,
&tempu[st_col*ldu], &ldu, &beta, tempv1, &temp_nbrow);
#endif
/* Now scattering the block */
if ( ib<jb ) {
dscatter_u (
ib, jb,
nsupc, iukp,xsup,
klst, temp_nbrow,
lptr, temp_nbrow,lsub,
usub, tempv1,
Ufstnz_br_ptr,
Unzval_br_ptr,
grid
);
} else {
dscatter_l (
ib, ljb, nsupc,iukp,xsup,klst,temp_nbrow,lptr,
temp_nbrow,usub,lsub,tempv1,
indirect_thread, indirect2_thread,
Lrowind_bc_ptr,Lnzval_bc_ptr,grid
);
}
} /* for (int ij =... */
} /* if k L(:,k) and U(k,:) are not empty */
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 8;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
txinvr.c | //-------------------------------------------------------------------------//
// //
// This benchmark is an OpenMP C version of the NPB SP code. This OpenMP //
// C version is developed by the Center for Manycore Programming at Seoul //
// National University and derived from the OpenMP Fortran versions in //
// "NPB3.3-OMP" developed by NAS. //
// //
// Permission to use, copy, distribute and modify this software for any //
// purpose with or without fee is hereby granted. This software is //
// provided "as is" without express or implied warranty. //
// //
// Information on NPB 3.3, including the technical report, the original //
// specifications, source code, results and information on how to submit //
// new results, is available at: //
// //
// http://www.nas.nasa.gov/Software/NPB/ //
// //
// Send comments or suggestions for this OpenMP C version to //
// cmp@aces.snu.ac.kr //
// //
// Center for Manycore Programming //
// School of Computer Science and Engineering //
// Seoul National University //
// Seoul 151-744, Korea //
// //
// E-mail: cmp@aces.snu.ac.kr //
// //
//-------------------------------------------------------------------------//
//-------------------------------------------------------------------------//
// Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, //
// and Jaejin Lee //
//-------------------------------------------------------------------------//
#include "header.h"
//---------------------------------------------------------------------
// block-diagonal matrix-vector multiplication
//---------------------------------------------------------------------
void txinvr()
{
int i, j, k;
double t1, t2, t3, ac, ru1, uu, vv, ww, r1, r2, r3, r4, r5, ac2inv;
//kai
// int k12;
// consistent_data(&k12, "int", 1);
if (timeron) timer_start(t_txinvr);
#pragma omp parallel for default(shared) \
private(i,j,k,t1,t2,t3,ac,ru1,uu,vv,ww,r1,r2,r3,r4,r5,ac2inv)
for (k = k12+1; k <= nz2; k++) {
for (j = 1; j <= ny2; j++) {
for (i = 1; i <= nx2; i++) {
ru1 = rho_i[k][j][i];
uu = us[k][j][i];
vv = vs[k][j][i];
ww = ws[k][j][i];
ac = speed[k][j][i];
ac2inv = ac*ac;
r1 = rhs[k][j][i][0];
r2 = rhs[k][j][i][1];
r3 = rhs[k][j][i][2];
r4 = rhs[k][j][i][3];
r5 = rhs[k][j][i][4];
t1 = c2 / ac2inv * ( qs[k][j][i]*r1 - uu*r2 - vv*r3 - ww*r4 + r5 );
t2 = bt * ru1 * ( uu * r1 - r2 );
t3 = ( bt * ru1 * ac ) * t1;
rhs[k][j][i][0] = r1 - t1;
rhs[k][j][i][1] = - ru1 * ( ww*r1 - r4 );
rhs[k][j][i][2] = ru1 * ( vv*r1 - r3 );
rhs[k][j][i][3] = - t2 + t3;
rhs[k][j][i][4] = t2 + t3;
}
}
//kai
k12 = 0;
}
if (timeron) timer_stop(t_txinvr);
}
|
dds.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD DDDD SSSSS %
% D D D D SS %
% D D D D SSS %
% D D D D SS %
% DDDD DDDD SSSSS %
% %
% %
% Read/Write Microsoft Direct Draw Surface Image Format %
% %
% Software Design %
% Bianca van Schaik %
% March 2008 %
% Dirk Lemstra %
% September 2013 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/attribute.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel-accessor.h"
#include "magick/profile.h"
#include "magick/quantum.h"
#include "magick/quantum-private.h"
#include "magick/resource_.h"
#include "magick/static.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/module.h"
#include "magick/transform.h"
/*
Definitions
*/
#define DDSD_CAPS 0x00000001
#define DDSD_HEIGHT 0x00000002
#define DDSD_WIDTH 0x00000004
#define DDSD_PITCH 0x00000008
#define DDSD_PIXELFORMAT 0x00001000
#define DDSD_MIPMAPCOUNT 0x00020000
#define DDSD_LINEARSIZE 0x00080000
#define DDSD_DEPTH 0x00800000
#define DDPF_ALPHAPIXELS 0x00000001
#define DDPF_FOURCC 0x00000004
#define DDPF_RGB 0x00000040
#define DDPF_LUMINANCE 0x00020000
#define FOURCC_DXT1 0x31545844
#define FOURCC_DXT3 0x33545844
#define FOURCC_DXT5 0x35545844
#define DDSCAPS_COMPLEX 0x00000008
#define DDSCAPS_TEXTURE 0x00001000
#define DDSCAPS_MIPMAP 0x00400000
#define DDSCAPS2_CUBEMAP 0x00000200
#define DDSCAPS2_CUBEMAP_POSITIVEX 0x00000400
#define DDSCAPS2_CUBEMAP_NEGATIVEX 0x00000800
#define DDSCAPS2_CUBEMAP_POSITIVEY 0x00001000
#define DDSCAPS2_CUBEMAP_NEGATIVEY 0x00002000
#define DDSCAPS2_CUBEMAP_POSITIVEZ 0x00004000
#define DDSCAPS2_CUBEMAP_NEGATIVEZ 0x00008000
#define DDSCAPS2_VOLUME 0x00200000
#ifndef SIZE_MAX
#define SIZE_MAX ((size_t) -1)
#endif
/*
Structure declarations.
*/
typedef struct _DDSPixelFormat
{
size_t
flags,
fourcc,
rgb_bitcount,
r_bitmask,
g_bitmask,
b_bitmask,
alpha_bitmask;
} DDSPixelFormat;
typedef struct _DDSInfo
{
size_t
flags,
height,
width,
pitchOrLinearSize,
depth,
mipmapcount,
ddscaps1,
ddscaps2;
DDSPixelFormat
pixelformat;
} DDSInfo;
typedef struct _DDSColors
{
unsigned char
r[4],
g[4],
b[4],
a[4];
} DDSColors;
typedef struct _DDSVector4
{
float
x,
y,
z,
w;
} DDSVector4;
typedef struct _DDSVector3
{
float
x,
y,
z;
} DDSVector3;
typedef struct _DDSSourceBlock
{
unsigned char
start,
end,
error;
} DDSSourceBlock;
typedef struct _DDSSingleColourLookup
{
DDSSourceBlock sources[2];
} DDSSingleColourLookup;
typedef MagickBooleanType
DDSDecoder(Image *, DDSInfo *, ExceptionInfo *);
static const DDSSingleColourLookup DDSLookup_5_4[] =
{
{ { { 0, 0, 0 }, { 0, 0, 0 } } },
{ { { 0, 0, 1 }, { 0, 1, 1 } } },
{ { { 0, 0, 2 }, { 0, 1, 0 } } },
{ { { 0, 0, 3 }, { 0, 1, 1 } } },
{ { { 0, 0, 4 }, { 0, 2, 1 } } },
{ { { 1, 0, 3 }, { 0, 2, 0 } } },
{ { { 1, 0, 2 }, { 0, 2, 1 } } },
{ { { 1, 0, 1 }, { 0, 3, 1 } } },
{ { { 1, 0, 0 }, { 0, 3, 0 } } },
{ { { 1, 0, 1 }, { 1, 2, 1 } } },
{ { { 1, 0, 2 }, { 1, 2, 0 } } },
{ { { 1, 0, 3 }, { 0, 4, 0 } } },
{ { { 1, 0, 4 }, { 0, 5, 1 } } },
{ { { 2, 0, 3 }, { 0, 5, 0 } } },
{ { { 2, 0, 2 }, { 0, 5, 1 } } },
{ { { 2, 0, 1 }, { 0, 6, 1 } } },
{ { { 2, 0, 0 }, { 0, 6, 0 } } },
{ { { 2, 0, 1 }, { 2, 3, 1 } } },
{ { { 2, 0, 2 }, { 2, 3, 0 } } },
{ { { 2, 0, 3 }, { 0, 7, 0 } } },
{ { { 2, 0, 4 }, { 1, 6, 1 } } },
{ { { 3, 0, 3 }, { 1, 6, 0 } } },
{ { { 3, 0, 2 }, { 0, 8, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 0 }, { 0, 9, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 2 }, { 0, 10, 1 } } },
{ { { 3, 0, 3 }, { 0, 10, 0 } } },
{ { { 3, 0, 4 }, { 2, 7, 1 } } },
{ { { 4, 0, 4 }, { 2, 7, 0 } } },
{ { { 4, 0, 3 }, { 0, 11, 0 } } },
{ { { 4, 0, 2 }, { 1, 10, 1 } } },
{ { { 4, 0, 1 }, { 1, 10, 0 } } },
{ { { 4, 0, 0 }, { 0, 12, 0 } } },
{ { { 4, 0, 1 }, { 0, 13, 1 } } },
{ { { 4, 0, 2 }, { 0, 13, 0 } } },
{ { { 4, 0, 3 }, { 0, 13, 1 } } },
{ { { 4, 0, 4 }, { 0, 14, 1 } } },
{ { { 5, 0, 3 }, { 0, 14, 0 } } },
{ { { 5, 0, 2 }, { 2, 11, 1 } } },
{ { { 5, 0, 1 }, { 2, 11, 0 } } },
{ { { 5, 0, 0 }, { 0, 15, 0 } } },
{ { { 5, 0, 1 }, { 1, 14, 1 } } },
{ { { 5, 0, 2 }, { 1, 14, 0 } } },
{ { { 5, 0, 3 }, { 0, 16, 0 } } },
{ { { 5, 0, 4 }, { 0, 17, 1 } } },
{ { { 6, 0, 3 }, { 0, 17, 0 } } },
{ { { 6, 0, 2 }, { 0, 17, 1 } } },
{ { { 6, 0, 1 }, { 0, 18, 1 } } },
{ { { 6, 0, 0 }, { 0, 18, 0 } } },
{ { { 6, 0, 1 }, { 2, 15, 1 } } },
{ { { 6, 0, 2 }, { 2, 15, 0 } } },
{ { { 6, 0, 3 }, { 0, 19, 0 } } },
{ { { 6, 0, 4 }, { 1, 18, 1 } } },
{ { { 7, 0, 3 }, { 1, 18, 0 } } },
{ { { 7, 0, 2 }, { 0, 20, 0 } } },
{ { { 7, 0, 1 }, { 0, 21, 1 } } },
{ { { 7, 0, 0 }, { 0, 21, 0 } } },
{ { { 7, 0, 1 }, { 0, 21, 1 } } },
{ { { 7, 0, 2 }, { 0, 22, 1 } } },
{ { { 7, 0, 3 }, { 0, 22, 0 } } },
{ { { 7, 0, 4 }, { 2, 19, 1 } } },
{ { { 8, 0, 4 }, { 2, 19, 0 } } },
{ { { 8, 0, 3 }, { 0, 23, 0 } } },
{ { { 8, 0, 2 }, { 1, 22, 1 } } },
{ { { 8, 0, 1 }, { 1, 22, 0 } } },
{ { { 8, 0, 0 }, { 0, 24, 0 } } },
{ { { 8, 0, 1 }, { 0, 25, 1 } } },
{ { { 8, 0, 2 }, { 0, 25, 0 } } },
{ { { 8, 0, 3 }, { 0, 25, 1 } } },
{ { { 8, 0, 4 }, { 0, 26, 1 } } },
{ { { 9, 0, 3 }, { 0, 26, 0 } } },
{ { { 9, 0, 2 }, { 2, 23, 1 } } },
{ { { 9, 0, 1 }, { 2, 23, 0 } } },
{ { { 9, 0, 0 }, { 0, 27, 0 } } },
{ { { 9, 0, 1 }, { 1, 26, 1 } } },
{ { { 9, 0, 2 }, { 1, 26, 0 } } },
{ { { 9, 0, 3 }, { 0, 28, 0 } } },
{ { { 9, 0, 4 }, { 0, 29, 1 } } },
{ { { 10, 0, 3 }, { 0, 29, 0 } } },
{ { { 10, 0, 2 }, { 0, 29, 1 } } },
{ { { 10, 0, 1 }, { 0, 30, 1 } } },
{ { { 10, 0, 0 }, { 0, 30, 0 } } },
{ { { 10, 0, 1 }, { 2, 27, 1 } } },
{ { { 10, 0, 2 }, { 2, 27, 0 } } },
{ { { 10, 0, 3 }, { 0, 31, 0 } } },
{ { { 10, 0, 4 }, { 1, 30, 1 } } },
{ { { 11, 0, 3 }, { 1, 30, 0 } } },
{ { { 11, 0, 2 }, { 4, 24, 0 } } },
{ { { 11, 0, 1 }, { 1, 31, 1 } } },
{ { { 11, 0, 0 }, { 1, 31, 0 } } },
{ { { 11, 0, 1 }, { 1, 31, 1 } } },
{ { { 11, 0, 2 }, { 2, 30, 1 } } },
{ { { 11, 0, 3 }, { 2, 30, 0 } } },
{ { { 11, 0, 4 }, { 2, 31, 1 } } },
{ { { 12, 0, 4 }, { 2, 31, 0 } } },
{ { { 12, 0, 3 }, { 4, 27, 0 } } },
{ { { 12, 0, 2 }, { 3, 30, 1 } } },
{ { { 12, 0, 1 }, { 3, 30, 0 } } },
{ { { 12, 0, 0 }, { 4, 28, 0 } } },
{ { { 12, 0, 1 }, { 3, 31, 1 } } },
{ { { 12, 0, 2 }, { 3, 31, 0 } } },
{ { { 12, 0, 3 }, { 3, 31, 1 } } },
{ { { 12, 0, 4 }, { 4, 30, 1 } } },
{ { { 13, 0, 3 }, { 4, 30, 0 } } },
{ { { 13, 0, 2 }, { 6, 27, 1 } } },
{ { { 13, 0, 1 }, { 6, 27, 0 } } },
{ { { 13, 0, 0 }, { 4, 31, 0 } } },
{ { { 13, 0, 1 }, { 5, 30, 1 } } },
{ { { 13, 0, 2 }, { 5, 30, 0 } } },
{ { { 13, 0, 3 }, { 8, 24, 0 } } },
{ { { 13, 0, 4 }, { 5, 31, 1 } } },
{ { { 14, 0, 3 }, { 5, 31, 0 } } },
{ { { 14, 0, 2 }, { 5, 31, 1 } } },
{ { { 14, 0, 1 }, { 6, 30, 1 } } },
{ { { 14, 0, 0 }, { 6, 30, 0 } } },
{ { { 14, 0, 1 }, { 6, 31, 1 } } },
{ { { 14, 0, 2 }, { 6, 31, 0 } } },
{ { { 14, 0, 3 }, { 8, 27, 0 } } },
{ { { 14, 0, 4 }, { 7, 30, 1 } } },
{ { { 15, 0, 3 }, { 7, 30, 0 } } },
{ { { 15, 0, 2 }, { 8, 28, 0 } } },
{ { { 15, 0, 1 }, { 7, 31, 1 } } },
{ { { 15, 0, 0 }, { 7, 31, 0 } } },
{ { { 15, 0, 1 }, { 7, 31, 1 } } },
{ { { 15, 0, 2 }, { 8, 30, 1 } } },
{ { { 15, 0, 3 }, { 8, 30, 0 } } },
{ { { 15, 0, 4 }, { 10, 27, 1 } } },
{ { { 16, 0, 4 }, { 10, 27, 0 } } },
{ { { 16, 0, 3 }, { 8, 31, 0 } } },
{ { { 16, 0, 2 }, { 9, 30, 1 } } },
{ { { 16, 0, 1 }, { 9, 30, 0 } } },
{ { { 16, 0, 0 }, { 12, 24, 0 } } },
{ { { 16, 0, 1 }, { 9, 31, 1 } } },
{ { { 16, 0, 2 }, { 9, 31, 0 } } },
{ { { 16, 0, 3 }, { 9, 31, 1 } } },
{ { { 16, 0, 4 }, { 10, 30, 1 } } },
{ { { 17, 0, 3 }, { 10, 30, 0 } } },
{ { { 17, 0, 2 }, { 10, 31, 1 } } },
{ { { 17, 0, 1 }, { 10, 31, 0 } } },
{ { { 17, 0, 0 }, { 12, 27, 0 } } },
{ { { 17, 0, 1 }, { 11, 30, 1 } } },
{ { { 17, 0, 2 }, { 11, 30, 0 } } },
{ { { 17, 0, 3 }, { 12, 28, 0 } } },
{ { { 17, 0, 4 }, { 11, 31, 1 } } },
{ { { 18, 0, 3 }, { 11, 31, 0 } } },
{ { { 18, 0, 2 }, { 11, 31, 1 } } },
{ { { 18, 0, 1 }, { 12, 30, 1 } } },
{ { { 18, 0, 0 }, { 12, 30, 0 } } },
{ { { 18, 0, 1 }, { 14, 27, 1 } } },
{ { { 18, 0, 2 }, { 14, 27, 0 } } },
{ { { 18, 0, 3 }, { 12, 31, 0 } } },
{ { { 18, 0, 4 }, { 13, 30, 1 } } },
{ { { 19, 0, 3 }, { 13, 30, 0 } } },
{ { { 19, 0, 2 }, { 16, 24, 0 } } },
{ { { 19, 0, 1 }, { 13, 31, 1 } } },
{ { { 19, 0, 0 }, { 13, 31, 0 } } },
{ { { 19, 0, 1 }, { 13, 31, 1 } } },
{ { { 19, 0, 2 }, { 14, 30, 1 } } },
{ { { 19, 0, 3 }, { 14, 30, 0 } } },
{ { { 19, 0, 4 }, { 14, 31, 1 } } },
{ { { 20, 0, 4 }, { 14, 31, 0 } } },
{ { { 20, 0, 3 }, { 16, 27, 0 } } },
{ { { 20, 0, 2 }, { 15, 30, 1 } } },
{ { { 20, 0, 1 }, { 15, 30, 0 } } },
{ { { 20, 0, 0 }, { 16, 28, 0 } } },
{ { { 20, 0, 1 }, { 15, 31, 1 } } },
{ { { 20, 0, 2 }, { 15, 31, 0 } } },
{ { { 20, 0, 3 }, { 15, 31, 1 } } },
{ { { 20, 0, 4 }, { 16, 30, 1 } } },
{ { { 21, 0, 3 }, { 16, 30, 0 } } },
{ { { 21, 0, 2 }, { 18, 27, 1 } } },
{ { { 21, 0, 1 }, { 18, 27, 0 } } },
{ { { 21, 0, 0 }, { 16, 31, 0 } } },
{ { { 21, 0, 1 }, { 17, 30, 1 } } },
{ { { 21, 0, 2 }, { 17, 30, 0 } } },
{ { { 21, 0, 3 }, { 20, 24, 0 } } },
{ { { 21, 0, 4 }, { 17, 31, 1 } } },
{ { { 22, 0, 3 }, { 17, 31, 0 } } },
{ { { 22, 0, 2 }, { 17, 31, 1 } } },
{ { { 22, 0, 1 }, { 18, 30, 1 } } },
{ { { 22, 0, 0 }, { 18, 30, 0 } } },
{ { { 22, 0, 1 }, { 18, 31, 1 } } },
{ { { 22, 0, 2 }, { 18, 31, 0 } } },
{ { { 22, 0, 3 }, { 20, 27, 0 } } },
{ { { 22, 0, 4 }, { 19, 30, 1 } } },
{ { { 23, 0, 3 }, { 19, 30, 0 } } },
{ { { 23, 0, 2 }, { 20, 28, 0 } } },
{ { { 23, 0, 1 }, { 19, 31, 1 } } },
{ { { 23, 0, 0 }, { 19, 31, 0 } } },
{ { { 23, 0, 1 }, { 19, 31, 1 } } },
{ { { 23, 0, 2 }, { 20, 30, 1 } } },
{ { { 23, 0, 3 }, { 20, 30, 0 } } },
{ { { 23, 0, 4 }, { 22, 27, 1 } } },
{ { { 24, 0, 4 }, { 22, 27, 0 } } },
{ { { 24, 0, 3 }, { 20, 31, 0 } } },
{ { { 24, 0, 2 }, { 21, 30, 1 } } },
{ { { 24, 0, 1 }, { 21, 30, 0 } } },
{ { { 24, 0, 0 }, { 24, 24, 0 } } },
{ { { 24, 0, 1 }, { 21, 31, 1 } } },
{ { { 24, 0, 2 }, { 21, 31, 0 } } },
{ { { 24, 0, 3 }, { 21, 31, 1 } } },
{ { { 24, 0, 4 }, { 22, 30, 1 } } },
{ { { 25, 0, 3 }, { 22, 30, 0 } } },
{ { { 25, 0, 2 }, { 22, 31, 1 } } },
{ { { 25, 0, 1 }, { 22, 31, 0 } } },
{ { { 25, 0, 0 }, { 24, 27, 0 } } },
{ { { 25, 0, 1 }, { 23, 30, 1 } } },
{ { { 25, 0, 2 }, { 23, 30, 0 } } },
{ { { 25, 0, 3 }, { 24, 28, 0 } } },
{ { { 25, 0, 4 }, { 23, 31, 1 } } },
{ { { 26, 0, 3 }, { 23, 31, 0 } } },
{ { { 26, 0, 2 }, { 23, 31, 1 } } },
{ { { 26, 0, 1 }, { 24, 30, 1 } } },
{ { { 26, 0, 0 }, { 24, 30, 0 } } },
{ { { 26, 0, 1 }, { 26, 27, 1 } } },
{ { { 26, 0, 2 }, { 26, 27, 0 } } },
{ { { 26, 0, 3 }, { 24, 31, 0 } } },
{ { { 26, 0, 4 }, { 25, 30, 1 } } },
{ { { 27, 0, 3 }, { 25, 30, 0 } } },
{ { { 27, 0, 2 }, { 28, 24, 0 } } },
{ { { 27, 0, 1 }, { 25, 31, 1 } } },
{ { { 27, 0, 0 }, { 25, 31, 0 } } },
{ { { 27, 0, 1 }, { 25, 31, 1 } } },
{ { { 27, 0, 2 }, { 26, 30, 1 } } },
{ { { 27, 0, 3 }, { 26, 30, 0 } } },
{ { { 27, 0, 4 }, { 26, 31, 1 } } },
{ { { 28, 0, 4 }, { 26, 31, 0 } } },
{ { { 28, 0, 3 }, { 28, 27, 0 } } },
{ { { 28, 0, 2 }, { 27, 30, 1 } } },
{ { { 28, 0, 1 }, { 27, 30, 0 } } },
{ { { 28, 0, 0 }, { 28, 28, 0 } } },
{ { { 28, 0, 1 }, { 27, 31, 1 } } },
{ { { 28, 0, 2 }, { 27, 31, 0 } } },
{ { { 28, 0, 3 }, { 27, 31, 1 } } },
{ { { 28, 0, 4 }, { 28, 30, 1 } } },
{ { { 29, 0, 3 }, { 28, 30, 0 } } },
{ { { 29, 0, 2 }, { 30, 27, 1 } } },
{ { { 29, 0, 1 }, { 30, 27, 0 } } },
{ { { 29, 0, 0 }, { 28, 31, 0 } } },
{ { { 29, 0, 1 }, { 29, 30, 1 } } },
{ { { 29, 0, 2 }, { 29, 30, 0 } } },
{ { { 29, 0, 3 }, { 29, 30, 1 } } },
{ { { 29, 0, 4 }, { 29, 31, 1 } } },
{ { { 30, 0, 3 }, { 29, 31, 0 } } },
{ { { 30, 0, 2 }, { 29, 31, 1 } } },
{ { { 30, 0, 1 }, { 30, 30, 1 } } },
{ { { 30, 0, 0 }, { 30, 30, 0 } } },
{ { { 30, 0, 1 }, { 30, 31, 1 } } },
{ { { 30, 0, 2 }, { 30, 31, 0 } } },
{ { { 30, 0, 3 }, { 30, 31, 1 } } },
{ { { 30, 0, 4 }, { 31, 30, 1 } } },
{ { { 31, 0, 3 }, { 31, 30, 0 } } },
{ { { 31, 0, 2 }, { 31, 30, 1 } } },
{ { { 31, 0, 1 }, { 31, 31, 1 } } },
{ { { 31, 0, 0 }, { 31, 31, 0 } } }
};
static const DDSSingleColourLookup DDSLookup_6_4[] =
{
{ { { 0, 0, 0 }, { 0, 0, 0 } } },
{ { { 0, 0, 1 }, { 0, 1, 0 } } },
{ { { 0, 0, 2 }, { 0, 2, 0 } } },
{ { { 1, 0, 1 }, { 0, 3, 1 } } },
{ { { 1, 0, 0 }, { 0, 3, 0 } } },
{ { { 1, 0, 1 }, { 0, 4, 0 } } },
{ { { 1, 0, 2 }, { 0, 5, 0 } } },
{ { { 2, 0, 1 }, { 0, 6, 1 } } },
{ { { 2, 0, 0 }, { 0, 6, 0 } } },
{ { { 2, 0, 1 }, { 0, 7, 0 } } },
{ { { 2, 0, 2 }, { 0, 8, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 0 }, { 0, 9, 0 } } },
{ { { 3, 0, 1 }, { 0, 10, 0 } } },
{ { { 3, 0, 2 }, { 0, 11, 0 } } },
{ { { 4, 0, 1 }, { 0, 12, 1 } } },
{ { { 4, 0, 0 }, { 0, 12, 0 } } },
{ { { 4, 0, 1 }, { 0, 13, 0 } } },
{ { { 4, 0, 2 }, { 0, 14, 0 } } },
{ { { 5, 0, 1 }, { 0, 15, 1 } } },
{ { { 5, 0, 0 }, { 0, 15, 0 } } },
{ { { 5, 0, 1 }, { 0, 16, 0 } } },
{ { { 5, 0, 2 }, { 1, 15, 0 } } },
{ { { 6, 0, 1 }, { 0, 17, 0 } } },
{ { { 6, 0, 0 }, { 0, 18, 0 } } },
{ { { 6, 0, 1 }, { 0, 19, 0 } } },
{ { { 6, 0, 2 }, { 3, 14, 0 } } },
{ { { 7, 0, 1 }, { 0, 20, 0 } } },
{ { { 7, 0, 0 }, { 0, 21, 0 } } },
{ { { 7, 0, 1 }, { 0, 22, 0 } } },
{ { { 7, 0, 2 }, { 4, 15, 0 } } },
{ { { 8, 0, 1 }, { 0, 23, 0 } } },
{ { { 8, 0, 0 }, { 0, 24, 0 } } },
{ { { 8, 0, 1 }, { 0, 25, 0 } } },
{ { { 8, 0, 2 }, { 6, 14, 0 } } },
{ { { 9, 0, 1 }, { 0, 26, 0 } } },
{ { { 9, 0, 0 }, { 0, 27, 0 } } },
{ { { 9, 0, 1 }, { 0, 28, 0 } } },
{ { { 9, 0, 2 }, { 7, 15, 0 } } },
{ { { 10, 0, 1 }, { 0, 29, 0 } } },
{ { { 10, 0, 0 }, { 0, 30, 0 } } },
{ { { 10, 0, 1 }, { 0, 31, 0 } } },
{ { { 10, 0, 2 }, { 9, 14, 0 } } },
{ { { 11, 0, 1 }, { 0, 32, 0 } } },
{ { { 11, 0, 0 }, { 0, 33, 0 } } },
{ { { 11, 0, 1 }, { 2, 30, 0 } } },
{ { { 11, 0, 2 }, { 0, 34, 0 } } },
{ { { 12, 0, 1 }, { 0, 35, 0 } } },
{ { { 12, 0, 0 }, { 0, 36, 0 } } },
{ { { 12, 0, 1 }, { 3, 31, 0 } } },
{ { { 12, 0, 2 }, { 0, 37, 0 } } },
{ { { 13, 0, 1 }, { 0, 38, 0 } } },
{ { { 13, 0, 0 }, { 0, 39, 0 } } },
{ { { 13, 0, 1 }, { 5, 30, 0 } } },
{ { { 13, 0, 2 }, { 0, 40, 0 } } },
{ { { 14, 0, 1 }, { 0, 41, 0 } } },
{ { { 14, 0, 0 }, { 0, 42, 0 } } },
{ { { 14, 0, 1 }, { 6, 31, 0 } } },
{ { { 14, 0, 2 }, { 0, 43, 0 } } },
{ { { 15, 0, 1 }, { 0, 44, 0 } } },
{ { { 15, 0, 0 }, { 0, 45, 0 } } },
{ { { 15, 0, 1 }, { 8, 30, 0 } } },
{ { { 15, 0, 2 }, { 0, 46, 0 } } },
{ { { 16, 0, 2 }, { 0, 47, 0 } } },
{ { { 16, 0, 1 }, { 1, 46, 0 } } },
{ { { 16, 0, 0 }, { 0, 48, 0 } } },
{ { { 16, 0, 1 }, { 0, 49, 0 } } },
{ { { 16, 0, 2 }, { 0, 50, 0 } } },
{ { { 17, 0, 1 }, { 2, 47, 0 } } },
{ { { 17, 0, 0 }, { 0, 51, 0 } } },
{ { { 17, 0, 1 }, { 0, 52, 0 } } },
{ { { 17, 0, 2 }, { 0, 53, 0 } } },
{ { { 18, 0, 1 }, { 4, 46, 0 } } },
{ { { 18, 0, 0 }, { 0, 54, 0 } } },
{ { { 18, 0, 1 }, { 0, 55, 0 } } },
{ { { 18, 0, 2 }, { 0, 56, 0 } } },
{ { { 19, 0, 1 }, { 5, 47, 0 } } },
{ { { 19, 0, 0 }, { 0, 57, 0 } } },
{ { { 19, 0, 1 }, { 0, 58, 0 } } },
{ { { 19, 0, 2 }, { 0, 59, 0 } } },
{ { { 20, 0, 1 }, { 7, 46, 0 } } },
{ { { 20, 0, 0 }, { 0, 60, 0 } } },
{ { { 20, 0, 1 }, { 0, 61, 0 } } },
{ { { 20, 0, 2 }, { 0, 62, 0 } } },
{ { { 21, 0, 1 }, { 8, 47, 0 } } },
{ { { 21, 0, 0 }, { 0, 63, 0 } } },
{ { { 21, 0, 1 }, { 1, 62, 0 } } },
{ { { 21, 0, 2 }, { 1, 63, 0 } } },
{ { { 22, 0, 1 }, { 10, 46, 0 } } },
{ { { 22, 0, 0 }, { 2, 62, 0 } } },
{ { { 22, 0, 1 }, { 2, 63, 0 } } },
{ { { 22, 0, 2 }, { 3, 62, 0 } } },
{ { { 23, 0, 1 }, { 11, 47, 0 } } },
{ { { 23, 0, 0 }, { 3, 63, 0 } } },
{ { { 23, 0, 1 }, { 4, 62, 0 } } },
{ { { 23, 0, 2 }, { 4, 63, 0 } } },
{ { { 24, 0, 1 }, { 13, 46, 0 } } },
{ { { 24, 0, 0 }, { 5, 62, 0 } } },
{ { { 24, 0, 1 }, { 5, 63, 0 } } },
{ { { 24, 0, 2 }, { 6, 62, 0 } } },
{ { { 25, 0, 1 }, { 14, 47, 0 } } },
{ { { 25, 0, 0 }, { 6, 63, 0 } } },
{ { { 25, 0, 1 }, { 7, 62, 0 } } },
{ { { 25, 0, 2 }, { 7, 63, 0 } } },
{ { { 26, 0, 1 }, { 16, 45, 0 } } },
{ { { 26, 0, 0 }, { 8, 62, 0 } } },
{ { { 26, 0, 1 }, { 8, 63, 0 } } },
{ { { 26, 0, 2 }, { 9, 62, 0 } } },
{ { { 27, 0, 1 }, { 16, 48, 0 } } },
{ { { 27, 0, 0 }, { 9, 63, 0 } } },
{ { { 27, 0, 1 }, { 10, 62, 0 } } },
{ { { 27, 0, 2 }, { 10, 63, 0 } } },
{ { { 28, 0, 1 }, { 16, 51, 0 } } },
{ { { 28, 0, 0 }, { 11, 62, 0 } } },
{ { { 28, 0, 1 }, { 11, 63, 0 } } },
{ { { 28, 0, 2 }, { 12, 62, 0 } } },
{ { { 29, 0, 1 }, { 16, 54, 0 } } },
{ { { 29, 0, 0 }, { 12, 63, 0 } } },
{ { { 29, 0, 1 }, { 13, 62, 0 } } },
{ { { 29, 0, 2 }, { 13, 63, 0 } } },
{ { { 30, 0, 1 }, { 16, 57, 0 } } },
{ { { 30, 0, 0 }, { 14, 62, 0 } } },
{ { { 30, 0, 1 }, { 14, 63, 0 } } },
{ { { 30, 0, 2 }, { 15, 62, 0 } } },
{ { { 31, 0, 1 }, { 16, 60, 0 } } },
{ { { 31, 0, 0 }, { 15, 63, 0 } } },
{ { { 31, 0, 1 }, { 24, 46, 0 } } },
{ { { 31, 0, 2 }, { 16, 62, 0 } } },
{ { { 32, 0, 2 }, { 16, 63, 0 } } },
{ { { 32, 0, 1 }, { 17, 62, 0 } } },
{ { { 32, 0, 0 }, { 25, 47, 0 } } },
{ { { 32, 0, 1 }, { 17, 63, 0 } } },
{ { { 32, 0, 2 }, { 18, 62, 0 } } },
{ { { 33, 0, 1 }, { 18, 63, 0 } } },
{ { { 33, 0, 0 }, { 27, 46, 0 } } },
{ { { 33, 0, 1 }, { 19, 62, 0 } } },
{ { { 33, 0, 2 }, { 19, 63, 0 } } },
{ { { 34, 0, 1 }, { 20, 62, 0 } } },
{ { { 34, 0, 0 }, { 28, 47, 0 } } },
{ { { 34, 0, 1 }, { 20, 63, 0 } } },
{ { { 34, 0, 2 }, { 21, 62, 0 } } },
{ { { 35, 0, 1 }, { 21, 63, 0 } } },
{ { { 35, 0, 0 }, { 30, 46, 0 } } },
{ { { 35, 0, 1 }, { 22, 62, 0 } } },
{ { { 35, 0, 2 }, { 22, 63, 0 } } },
{ { { 36, 0, 1 }, { 23, 62, 0 } } },
{ { { 36, 0, 0 }, { 31, 47, 0 } } },
{ { { 36, 0, 1 }, { 23, 63, 0 } } },
{ { { 36, 0, 2 }, { 24, 62, 0 } } },
{ { { 37, 0, 1 }, { 24, 63, 0 } } },
{ { { 37, 0, 0 }, { 32, 47, 0 } } },
{ { { 37, 0, 1 }, { 25, 62, 0 } } },
{ { { 37, 0, 2 }, { 25, 63, 0 } } },
{ { { 38, 0, 1 }, { 26, 62, 0 } } },
{ { { 38, 0, 0 }, { 32, 50, 0 } } },
{ { { 38, 0, 1 }, { 26, 63, 0 } } },
{ { { 38, 0, 2 }, { 27, 62, 0 } } },
{ { { 39, 0, 1 }, { 27, 63, 0 } } },
{ { { 39, 0, 0 }, { 32, 53, 0 } } },
{ { { 39, 0, 1 }, { 28, 62, 0 } } },
{ { { 39, 0, 2 }, { 28, 63, 0 } } },
{ { { 40, 0, 1 }, { 29, 62, 0 } } },
{ { { 40, 0, 0 }, { 32, 56, 0 } } },
{ { { 40, 0, 1 }, { 29, 63, 0 } } },
{ { { 40, 0, 2 }, { 30, 62, 0 } } },
{ { { 41, 0, 1 }, { 30, 63, 0 } } },
{ { { 41, 0, 0 }, { 32, 59, 0 } } },
{ { { 41, 0, 1 }, { 31, 62, 0 } } },
{ { { 41, 0, 2 }, { 31, 63, 0 } } },
{ { { 42, 0, 1 }, { 32, 61, 0 } } },
{ { { 42, 0, 0 }, { 32, 62, 0 } } },
{ { { 42, 0, 1 }, { 32, 63, 0 } } },
{ { { 42, 0, 2 }, { 41, 46, 0 } } },
{ { { 43, 0, 1 }, { 33, 62, 0 } } },
{ { { 43, 0, 0 }, { 33, 63, 0 } } },
{ { { 43, 0, 1 }, { 34, 62, 0 } } },
{ { { 43, 0, 2 }, { 42, 47, 0 } } },
{ { { 44, 0, 1 }, { 34, 63, 0 } } },
{ { { 44, 0, 0 }, { 35, 62, 0 } } },
{ { { 44, 0, 1 }, { 35, 63, 0 } } },
{ { { 44, 0, 2 }, { 44, 46, 0 } } },
{ { { 45, 0, 1 }, { 36, 62, 0 } } },
{ { { 45, 0, 0 }, { 36, 63, 0 } } },
{ { { 45, 0, 1 }, { 37, 62, 0 } } },
{ { { 45, 0, 2 }, { 45, 47, 0 } } },
{ { { 46, 0, 1 }, { 37, 63, 0 } } },
{ { { 46, 0, 0 }, { 38, 62, 0 } } },
{ { { 46, 0, 1 }, { 38, 63, 0 } } },
{ { { 46, 0, 2 }, { 47, 46, 0 } } },
{ { { 47, 0, 1 }, { 39, 62, 0 } } },
{ { { 47, 0, 0 }, { 39, 63, 0 } } },
{ { { 47, 0, 1 }, { 40, 62, 0 } } },
{ { { 47, 0, 2 }, { 48, 46, 0 } } },
{ { { 48, 0, 2 }, { 40, 63, 0 } } },
{ { { 48, 0, 1 }, { 41, 62, 0 } } },
{ { { 48, 0, 0 }, { 41, 63, 0 } } },
{ { { 48, 0, 1 }, { 48, 49, 0 } } },
{ { { 48, 0, 2 }, { 42, 62, 0 } } },
{ { { 49, 0, 1 }, { 42, 63, 0 } } },
{ { { 49, 0, 0 }, { 43, 62, 0 } } },
{ { { 49, 0, 1 }, { 48, 52, 0 } } },
{ { { 49, 0, 2 }, { 43, 63, 0 } } },
{ { { 50, 0, 1 }, { 44, 62, 0 } } },
{ { { 50, 0, 0 }, { 44, 63, 0 } } },
{ { { 50, 0, 1 }, { 48, 55, 0 } } },
{ { { 50, 0, 2 }, { 45, 62, 0 } } },
{ { { 51, 0, 1 }, { 45, 63, 0 } } },
{ { { 51, 0, 0 }, { 46, 62, 0 } } },
{ { { 51, 0, 1 }, { 48, 58, 0 } } },
{ { { 51, 0, 2 }, { 46, 63, 0 } } },
{ { { 52, 0, 1 }, { 47, 62, 0 } } },
{ { { 52, 0, 0 }, { 47, 63, 0 } } },
{ { { 52, 0, 1 }, { 48, 61, 0 } } },
{ { { 52, 0, 2 }, { 48, 62, 0 } } },
{ { { 53, 0, 1 }, { 56, 47, 0 } } },
{ { { 53, 0, 0 }, { 48, 63, 0 } } },
{ { { 53, 0, 1 }, { 49, 62, 0 } } },
{ { { 53, 0, 2 }, { 49, 63, 0 } } },
{ { { 54, 0, 1 }, { 58, 46, 0 } } },
{ { { 54, 0, 0 }, { 50, 62, 0 } } },
{ { { 54, 0, 1 }, { 50, 63, 0 } } },
{ { { 54, 0, 2 }, { 51, 62, 0 } } },
{ { { 55, 0, 1 }, { 59, 47, 0 } } },
{ { { 55, 0, 0 }, { 51, 63, 0 } } },
{ { { 55, 0, 1 }, { 52, 62, 0 } } },
{ { { 55, 0, 2 }, { 52, 63, 0 } } },
{ { { 56, 0, 1 }, { 61, 46, 0 } } },
{ { { 56, 0, 0 }, { 53, 62, 0 } } },
{ { { 56, 0, 1 }, { 53, 63, 0 } } },
{ { { 56, 0, 2 }, { 54, 62, 0 } } },
{ { { 57, 0, 1 }, { 62, 47, 0 } } },
{ { { 57, 0, 0 }, { 54, 63, 0 } } },
{ { { 57, 0, 1 }, { 55, 62, 0 } } },
{ { { 57, 0, 2 }, { 55, 63, 0 } } },
{ { { 58, 0, 1 }, { 56, 62, 1 } } },
{ { { 58, 0, 0 }, { 56, 62, 0 } } },
{ { { 58, 0, 1 }, { 56, 63, 0 } } },
{ { { 58, 0, 2 }, { 57, 62, 0 } } },
{ { { 59, 0, 1 }, { 57, 63, 1 } } },
{ { { 59, 0, 0 }, { 57, 63, 0 } } },
{ { { 59, 0, 1 }, { 58, 62, 0 } } },
{ { { 59, 0, 2 }, { 58, 63, 0 } } },
{ { { 60, 0, 1 }, { 59, 62, 1 } } },
{ { { 60, 0, 0 }, { 59, 62, 0 } } },
{ { { 60, 0, 1 }, { 59, 63, 0 } } },
{ { { 60, 0, 2 }, { 60, 62, 0 } } },
{ { { 61, 0, 1 }, { 60, 63, 1 } } },
{ { { 61, 0, 0 }, { 60, 63, 0 } } },
{ { { 61, 0, 1 }, { 61, 62, 0 } } },
{ { { 61, 0, 2 }, { 61, 63, 0 } } },
{ { { 62, 0, 1 }, { 62, 62, 1 } } },
{ { { 62, 0, 0 }, { 62, 62, 0 } } },
{ { { 62, 0, 1 }, { 62, 63, 0 } } },
{ { { 62, 0, 2 }, { 63, 62, 0 } } },
{ { { 63, 0, 1 }, { 63, 63, 1 } } },
{ { { 63, 0, 0 }, { 63, 63, 0 } } }
};
static const DDSSingleColourLookup*
DDS_LOOKUP[] =
{
DDSLookup_5_4,
DDSLookup_6_4,
DDSLookup_5_4
};
/*
Macros
*/
#define C565_r(x) (((x) & 0xF800) >> 11)
#define C565_g(x) (((x) & 0x07E0) >> 5)
#define C565_b(x) ((x) & 0x001F)
#define C565_red(x) ( (C565_r(x) << 3 | C565_r(x) >> 2))
#define C565_green(x) ( (C565_g(x) << 2 | C565_g(x) >> 4))
#define C565_blue(x) ( (C565_b(x) << 3 | C565_b(x) >> 2))
#define DIV2(x) ((x) > 1 ? ((x) >> 1) : 1)
#define FixRange(min, max, steps) \
if (min > max) \
min = max; \
if ((ssize_t) max - min < steps) \
max = MagickMin(min + steps, 255); \
if ((ssize_t) max - min < steps) \
min = MagickMax(0, (ssize_t) max - steps)
#define Dot(left, right) (left.x*right.x) + (left.y*right.y) + (left.z*right.z)
#define VectorInit(vector, value) vector.x = vector.y = vector.z = vector.w \
= value
#define VectorInit3(vector, value) vector.x = vector.y = vector.z = value
#define IsBitMask(mask, r, g, b, a) (mask.r_bitmask == r && mask.g_bitmask == \
g && mask.b_bitmask == b && mask.alpha_bitmask == a)
/*
Forward declarations
*/
static MagickBooleanType
ConstructOrdering(const size_t,const DDSVector4 *,const DDSVector3,
DDSVector4 *,DDSVector4 *,unsigned char *,size_t),
ReadDDSInfo(Image *,DDSInfo *),
ReadDXT1(Image *,DDSInfo *,ExceptionInfo *),
ReadDXT3(Image *,DDSInfo *,ExceptionInfo *),
ReadDXT5(Image *,DDSInfo *,ExceptionInfo *),
ReadUncompressedRGB(Image *,DDSInfo *,ExceptionInfo *),
ReadUncompressedRGBA(Image *,DDSInfo *,ExceptionInfo *),
SkipDXTMipmaps(Image *,DDSInfo *,int,ExceptionInfo *),
SkipRGBMipmaps(Image *,DDSInfo *,int,ExceptionInfo *),
WriteDDSImage(const ImageInfo *,Image *),
WriteMipmaps(Image *,const size_t,const size_t,const size_t,
const MagickBooleanType,const MagickBooleanType,ExceptionInfo *);
static void
RemapIndices(const ssize_t *,const unsigned char *,unsigned char *),
WriteDDSInfo(Image *,const size_t,const size_t,const size_t),
WriteFourCC(Image *,const size_t,const MagickBooleanType,
const MagickBooleanType,ExceptionInfo *),
WriteImageData(Image *,const size_t,const size_t,const MagickBooleanType,
const MagickBooleanType,ExceptionInfo *),
WriteIndices(Image *,const DDSVector3,const DDSVector3, unsigned char *),
WriteSingleColorFit(Image *,const DDSVector4 *,const ssize_t *),
WriteUncompressed(Image *,ExceptionInfo *);
static inline void VectorAdd(const DDSVector4 left, const DDSVector4 right,
DDSVector4 *destination)
{
destination->x = left.x + right.x;
destination->y = left.y + right.y;
destination->z = left.z + right.z;
destination->w = left.w + right.w;
}
static inline void VectorClamp(DDSVector4 *value)
{
value->x = MagickMin(1.0f,MagickMax(0.0f,value->x));
value->y = MagickMin(1.0f,MagickMax(0.0f,value->y));
value->z = MagickMin(1.0f,MagickMax(0.0f,value->z));
value->w = MagickMin(1.0f,MagickMax(0.0f,value->w));
}
static inline void VectorClamp3(DDSVector3 *value)
{
value->x = MagickMin(1.0f,MagickMax(0.0f,value->x));
value->y = MagickMin(1.0f,MagickMax(0.0f,value->y));
value->z = MagickMin(1.0f,MagickMax(0.0f,value->z));
}
static inline void VectorCopy43(const DDSVector4 source,
DDSVector3 *destination)
{
destination->x = source.x;
destination->y = source.y;
destination->z = source.z;
}
static inline void VectorCopy44(const DDSVector4 source,
DDSVector4 *destination)
{
destination->x = source.x;
destination->y = source.y;
destination->z = source.z;
destination->w = source.w;
}
static inline void VectorNegativeMultiplySubtract(const DDSVector4 a,
const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination)
{
destination->x = c.x - (a.x * b.x);
destination->y = c.y - (a.y * b.y);
destination->z = c.z - (a.z * b.z);
destination->w = c.w - (a.w * b.w);
}
static inline void VectorMultiply(const DDSVector4 left,
const DDSVector4 right, DDSVector4 *destination)
{
destination->x = left.x * right.x;
destination->y = left.y * right.y;
destination->z = left.z * right.z;
destination->w = left.w * right.w;
}
static inline void VectorMultiply3(const DDSVector3 left,
const DDSVector3 right, DDSVector3 *destination)
{
destination->x = left.x * right.x;
destination->y = left.y * right.y;
destination->z = left.z * right.z;
}
static inline void VectorMultiplyAdd(const DDSVector4 a, const DDSVector4 b,
const DDSVector4 c, DDSVector4 *destination)
{
destination->x = (a.x * b.x) + c.x;
destination->y = (a.y * b.y) + c.y;
destination->z = (a.z * b.z) + c.z;
destination->w = (a.w * b.w) + c.w;
}
static inline void VectorMultiplyAdd3(const DDSVector3 a, const DDSVector3 b,
const DDSVector3 c, DDSVector3 *destination)
{
destination->x = (a.x * b.x) + c.x;
destination->y = (a.y * b.y) + c.y;
destination->z = (a.z * b.z) + c.z;
}
static inline void VectorReciprocal(const DDSVector4 value,
DDSVector4 *destination)
{
destination->x = 1.0f / value.x;
destination->y = 1.0f / value.y;
destination->z = 1.0f / value.z;
destination->w = 1.0f / value.w;
}
static inline void VectorSubtract(const DDSVector4 left,
const DDSVector4 right, DDSVector4 *destination)
{
destination->x = left.x - right.x;
destination->y = left.y - right.y;
destination->z = left.z - right.z;
destination->w = left.w - right.w;
}
static inline void VectorSubtract3(const DDSVector3 left,
const DDSVector3 right, DDSVector3 *destination)
{
destination->x = left.x - right.x;
destination->y = left.y - right.y;
destination->z = left.z - right.z;
}
static inline void VectorTruncate(DDSVector4 *value)
{
value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x);
value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y);
value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z);
value->w = value->w > 0.0f ? floor(value->w) : ceil(value->w);
}
static inline void VectorTruncate3(DDSVector3 *value)
{
value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x);
value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y);
value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z);
}
static void CalculateColors(unsigned short c0, unsigned short c1,
DDSColors *c, MagickBooleanType ignoreAlpha)
{
c->a[0] = c->a[1] = c->a[2] = c->a[3] = 0;
c->r[0] = (unsigned char) C565_red(c0);
c->g[0] = (unsigned char) C565_green(c0);
c->b[0] = (unsigned char) C565_blue(c0);
c->r[1] = (unsigned char) C565_red(c1);
c->g[1] = (unsigned char) C565_green(c1);
c->b[1] = (unsigned char) C565_blue(c1);
if (ignoreAlpha != MagickFalse || c0 > c1)
{
c->r[2] = (unsigned char) ((2 * c->r[0] + c->r[1]) / 3);
c->g[2] = (unsigned char) ((2 * c->g[0] + c->g[1]) / 3);
c->b[2] = (unsigned char) ((2 * c->b[0] + c->b[1]) / 3);
c->r[3] = (unsigned char) ((c->r[0] + 2 * c->r[1]) / 3);
c->g[3] = (unsigned char) ((c->g[0] + 2 * c->g[1]) / 3);
c->b[3] = (unsigned char) ((c->b[0] + 2 * c->b[1]) / 3);
}
else
{
c->r[2] = (unsigned char) ((c->r[0] + c->r[1]) / 2);
c->g[2] = (unsigned char) ((c->g[0] + c->g[1]) / 2);
c->b[2] = (unsigned char) ((c->b[0] + c->b[1]) / 2);
c->r[3] = c->g[3] = c->b[3] = 0;
c->a[3] = 255;
}
}
static size_t CompressAlpha(const size_t min, const size_t max,
const size_t steps, const ssize_t *alphas, unsigned char* indices)
{
unsigned char
codes[8];
register ssize_t
i;
size_t
error,
index,
j,
least,
value;
codes[0] = (unsigned char) min;
codes[1] = (unsigned char) max;
codes[6] = 0;
codes[7] = 255;
for (i=1; i < (ssize_t) steps; i++)
codes[i+1] = (unsigned char) (((steps-i)*min + i*max) / steps);
error = 0;
for (i=0; i<16; i++)
{
if (alphas[i] == -1)
{
indices[i] = 0;
continue;
}
value = alphas[i];
least = SIZE_MAX;
index = 0;
for (j=0; j<8; j++)
{
size_t
dist;
dist = value - (size_t)codes[j];
dist *= dist;
if (dist < least)
{
least = dist;
index = j;
}
}
indices[i] = (unsigned char)index;
error += least;
}
return error;
}
static void CompressClusterFit(const size_t count,
const DDSVector4 *points, const ssize_t *map, const DDSVector3 principle,
const DDSVector4 metric, DDSVector3 *start, DDSVector3 *end,
unsigned char *indices)
{
DDSVector3
axis;
DDSVector4
grid,
gridrcp,
half,
onethird_onethird2,
pointsWeights[16],
two,
twonineths,
twothirds_twothirds2,
xSumwSum;
float
bestError = 1e+37f;
size_t
bestIteration = 0,
besti = 0,
bestj = 0,
bestk = 0,
iterationIndex;
ssize_t
i;
unsigned char
*o,
order[128],
unordered[16];
VectorInit(half,0.5f);
VectorInit(two,2.0f);
VectorInit(onethird_onethird2,1.0f/3.0f);
onethird_onethird2.w = 1.0f/9.0f;
VectorInit(twothirds_twothirds2,2.0f/3.0f);
twothirds_twothirds2.w = 4.0f/9.0f;
VectorInit(twonineths,2.0f/9.0f);
grid.x = 31.0f;
grid.y = 63.0f;
grid.z = 31.0f;
grid.w = 0.0f;
gridrcp.x = 1.0f/31.0f;
gridrcp.y = 1.0f/63.0f;
gridrcp.z = 1.0f/31.0f;
gridrcp.w = 0.0f;
xSumwSum.x = 0.0f;
xSumwSum.y = 0.0f;
xSumwSum.z = 0.0f;
xSumwSum.w = 0.0f;
ConstructOrdering(count,points,principle,pointsWeights,&xSumwSum,order,0);
for (iterationIndex = 0;;)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,1) \
num_threads(GetMagickResourceLimit(ThreadResource))
#endif
for (i=0; i < (ssize_t) count; i++)
{
DDSVector4
part0,
part1,
part2;
size_t
ii,
j,
k,
kmin;
VectorInit(part0,0.0f);
for(ii=0; ii < (size_t) i; ii++)
VectorAdd(pointsWeights[ii],part0,&part0);
VectorInit(part1,0.0f);
for (j=(size_t) i;;)
{
if (j == 0)
{
VectorCopy44(pointsWeights[0],&part2);
kmin = 1;
}
else
{
VectorInit(part2,0.0f);
kmin = j;
}
for (k=kmin;;)
{
DDSVector4
a,
alpha2_sum,
alphax_sum,
alphabeta_sum,
b,
beta2_sum,
betax_sum,
e1,
e2,
factor,
part3;
float
error;
VectorSubtract(xSumwSum,part2,&part3);
VectorSubtract(part3,part1,&part3);
VectorSubtract(part3,part0,&part3);
VectorMultiplyAdd(part1,twothirds_twothirds2,part0,&alphax_sum);
VectorMultiplyAdd(part2,onethird_onethird2,alphax_sum,&alphax_sum);
VectorInit(alpha2_sum,alphax_sum.w);
VectorMultiplyAdd(part2,twothirds_twothirds2,part3,&betax_sum);
VectorMultiplyAdd(part1,onethird_onethird2,betax_sum,&betax_sum);
VectorInit(beta2_sum,betax_sum.w);
VectorAdd(part1,part2,&alphabeta_sum);
VectorInit(alphabeta_sum,alphabeta_sum.w);
VectorMultiply(twonineths,alphabeta_sum,&alphabeta_sum);
VectorMultiply(alpha2_sum,beta2_sum,&factor);
VectorNegativeMultiplySubtract(alphabeta_sum,alphabeta_sum,factor,
&factor);
VectorReciprocal(factor,&factor);
VectorMultiply(alphax_sum,beta2_sum,&a);
VectorNegativeMultiplySubtract(betax_sum,alphabeta_sum,a,&a);
VectorMultiply(a,factor,&a);
VectorMultiply(betax_sum,alpha2_sum,&b);
VectorNegativeMultiplySubtract(alphax_sum,alphabeta_sum,b,&b);
VectorMultiply(b,factor,&b);
VectorClamp(&a);
VectorMultiplyAdd(grid,a,half,&a);
VectorTruncate(&a);
VectorMultiply(a,gridrcp,&a);
VectorClamp(&b);
VectorMultiplyAdd(grid,b,half,&b);
VectorTruncate(&b);
VectorMultiply(b,gridrcp,&b);
VectorMultiply(b,b,&e1);
VectorMultiply(e1,beta2_sum,&e1);
VectorMultiply(a,a,&e2);
VectorMultiplyAdd(e2,alpha2_sum,e1,&e1);
VectorMultiply(a,b,&e2);
VectorMultiply(e2,alphabeta_sum,&e2);
VectorNegativeMultiplySubtract(a,alphax_sum,e2,&e2);
VectorNegativeMultiplySubtract(b,betax_sum,e2,&e2);
VectorMultiplyAdd(two,e2,e1,&e2);
VectorMultiply(e2,metric,&e2);
error = e2.x + e2.y + e2.z;
if (error < bestError)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (DDS_CompressClusterFit)
#endif
{
if (error < bestError)
{
VectorCopy43(a,start);
VectorCopy43(b,end);
bestError = error;
besti = i;
bestj = j;
bestk = k;
bestIteration = iterationIndex;
}
}
}
if (k == count)
break;
VectorAdd(pointsWeights[k],part2,&part2);
k++;
}
if (j == count)
break;
VectorAdd(pointsWeights[j],part1,&part1);
j++;
}
}
if (bestIteration != iterationIndex)
break;
iterationIndex++;
if (iterationIndex == 8)
break;
VectorSubtract3(*end,*start,&axis);
if (ConstructOrdering(count,points,axis,pointsWeights,&xSumwSum,order,
iterationIndex) == MagickFalse)
break;
}
o = order + (16*bestIteration);
for (i=0; i < (ssize_t) besti; i++)
unordered[o[i]] = 0;
for (i=besti; i < (ssize_t) bestj; i++)
unordered[o[i]] = 2;
for (i=bestj; i < (ssize_t) bestk; i++)
unordered[o[i]] = 3;
for (i=bestk; i < (ssize_t) count; i++)
unordered[o[i]] = 1;
RemapIndices(map,unordered,indices);
}
static void CompressRangeFit(const size_t count,
const DDSVector4 *points, const ssize_t *map, const DDSVector3 principle,
const DDSVector4 metric, DDSVector3 *start, DDSVector3 *end,
unsigned char *indices)
{
float
d,
bestDist,
max,
min,
val;
DDSVector3
codes[4],
grid,
gridrcp,
half,
dist;
register ssize_t
i;
size_t
bestj,
j;
unsigned char
closest[16];
VectorInit3(half,0.5f);
grid.x = 31.0f;
grid.y = 63.0f;
grid.z = 31.0f;
gridrcp.x = 1.0f/31.0f;
gridrcp.y = 1.0f/63.0f;
gridrcp.z = 1.0f/31.0f;
if (count > 0)
{
VectorCopy43(points[0],start);
VectorCopy43(points[0],end);
min = max = Dot(points[0],principle);
for (i=1; i < (ssize_t) count; i++)
{
val = Dot(points[i],principle);
if (val < min)
{
VectorCopy43(points[i],start);
min = val;
}
else if (val > max)
{
VectorCopy43(points[i],end);
max = val;
}
}
}
VectorClamp3(start);
VectorMultiplyAdd3(grid,*start,half,start);
VectorTruncate3(start);
VectorMultiply3(*start,gridrcp,start);
VectorClamp3(end);
VectorMultiplyAdd3(grid,*end,half,end);
VectorTruncate3(end);
VectorMultiply3(*end,gridrcp,end);
codes[0] = *start;
codes[1] = *end;
codes[2].x = (start->x * (2.0f/3.0f)) + (end->x * (1.0f/3.0f));
codes[2].y = (start->y * (2.0f/3.0f)) + (end->y * (1.0f/3.0f));
codes[2].z = (start->z * (2.0f/3.0f)) + (end->z * (1.0f/3.0f));
codes[3].x = (start->x * (1.0f/3.0f)) + (end->x * (2.0f/3.0f));
codes[3].y = (start->y * (1.0f/3.0f)) + (end->y * (2.0f/3.0f));
codes[3].z = (start->z * (1.0f/3.0f)) + (end->z * (2.0f/3.0f));
for (i=0; i < (ssize_t) count; i++)
{
bestDist = 1e+37f;
bestj = 0;
for (j=0; j < 4; j++)
{
dist.x = (points[i].x - codes[j].x) * metric.x;
dist.y = (points[i].y - codes[j].y) * metric.y;
dist.z = (points[i].z - codes[j].z) * metric.z;
d = Dot(dist,dist);
if (d < bestDist)
{
bestDist = d;
bestj = j;
}
}
closest[i] = (unsigned char) bestj;
}
RemapIndices(map, closest, indices);
}
static void ComputeEndPoints(const DDSSingleColourLookup *lookup[],
const unsigned char *color, DDSVector3 *start, DDSVector3 *end,
unsigned char *index)
{
register ssize_t
i;
size_t
c,
maxError = SIZE_MAX;
for (i=0; i < 2; i++)
{
const DDSSourceBlock*
sources[3];
size_t
error = 0;
for (c=0; c < 3; c++)
{
sources[c] = &lookup[c][color[c]].sources[i];
error += ((size_t) sources[c]->error) * ((size_t) sources[c]->error);
}
if (error > maxError)
continue;
start->x = (float) sources[0]->start / 31.0f;
start->y = (float) sources[1]->start / 63.0f;
start->z = (float) sources[2]->start / 31.0f;
end->x = (float) sources[0]->end / 31.0f;
end->y = (float) sources[1]->end / 63.0f;
end->z = (float) sources[2]->end / 31.0f;
*index = (unsigned char) (2*i);
maxError = error;
}
}
static void ComputePrincipleComponent(const float *covariance,
DDSVector3 *principle)
{
DDSVector4
row0,
row1,
row2,
v;
register ssize_t
i;
row0.x = covariance[0];
row0.y = covariance[1];
row0.z = covariance[2];
row0.w = 0.0f;
row1.x = covariance[1];
row1.y = covariance[3];
row1.z = covariance[4];
row1.w = 0.0f;
row2.x = covariance[2];
row2.y = covariance[4];
row2.z = covariance[5];
row2.w = 0.0f;
VectorInit(v,1.0f);
for (i=0; i < 8; i++)
{
DDSVector4
w;
float
a;
w.x = row0.x * v.x;
w.y = row0.y * v.x;
w.z = row0.z * v.x;
w.w = row0.w * v.x;
w.x = (row1.x * v.y) + w.x;
w.y = (row1.y * v.y) + w.y;
w.z = (row1.z * v.y) + w.z;
w.w = (row1.w * v.y) + w.w;
w.x = (row2.x * v.z) + w.x;
w.y = (row2.y * v.z) + w.y;
w.z = (row2.z * v.z) + w.z;
w.w = (row2.w * v.z) + w.w;
a = (float) PerceptibleReciprocal(MagickMax(w.x,MagickMax(w.y,w.z)));
v.x = w.x * a;
v.y = w.y * a;
v.z = w.z * a;
v.w = w.w * a;
}
VectorCopy43(v,principle);
}
static void ComputeWeightedCovariance(const size_t count,
const DDSVector4 *points, float *covariance)
{
DDSVector3
centroid;
float
total;
size_t
i;
total = 0.0f;
VectorInit3(centroid,0.0f);
for (i=0; i < count; i++)
{
total += points[i].w;
centroid.x += (points[i].x * points[i].w);
centroid.y += (points[i].y * points[i].w);
centroid.z += (points[i].z * points[i].w);
}
if( total > 1.192092896e-07F)
{
centroid.x /= total;
centroid.y /= total;
centroid.z /= total;
}
for (i=0; i < 6; i++)
covariance[i] = 0.0f;
for (i = 0; i < count; i++)
{
DDSVector3
a,
b;
a.x = points[i].x - centroid.x;
a.y = points[i].y - centroid.y;
a.z = points[i].z - centroid.z;
b.x = points[i].w * a.x;
b.y = points[i].w * a.y;
b.z = points[i].w * a.z;
covariance[0] += a.x*b.x;
covariance[1] += a.x*b.y;
covariance[2] += a.x*b.z;
covariance[3] += a.y*b.y;
covariance[4] += a.y*b.z;
covariance[5] += a.z*b.z;
}
}
static MagickBooleanType ConstructOrdering(const size_t count,
const DDSVector4 *points, const DDSVector3 axis, DDSVector4 *pointsWeights,
DDSVector4 *xSumwSum, unsigned char *order, size_t iteration)
{
float
dps[16],
f;
register ssize_t
i;
size_t
j;
unsigned char
c,
*o,
*p;
o = order + (16*iteration);
for (i=0; i < (ssize_t) count; i++)
{
dps[i] = Dot(points[i],axis);
o[i] = (unsigned char)i;
}
for (i=0; i < (ssize_t) count; i++)
{
for (j=i; j > 0 && dps[j] < dps[j - 1]; j--)
{
f = dps[j];
dps[j] = dps[j - 1];
dps[j - 1] = f;
c = o[j];
o[j] = o[j - 1];
o[j - 1] = c;
}
}
for (i=0; i < (ssize_t) iteration; i++)
{
MagickBooleanType
same;
p = order + (16*i);
same = MagickTrue;
for (j=0; j < count; j++)
{
if (o[j] != p[j])
{
same = MagickFalse;
break;
}
}
if (same != MagickFalse)
return MagickFalse;
}
xSumwSum->x = 0;
xSumwSum->y = 0;
xSumwSum->z = 0;
xSumwSum->w = 0;
for (i=0; i < (ssize_t) count; i++)
{
DDSVector4
v;
j = (size_t) o[i];
v.x = points[j].w * points[j].x;
v.y = points[j].w * points[j].y;
v.z = points[j].w * points[j].z;
v.w = points[j].w * 1.0f;
VectorCopy44(v,&pointsWeights[i]);
VectorAdd(*xSumwSum,v,xSumwSum);
}
return MagickTrue;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s D D S %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsDDS() returns MagickTrue if the image format type, identified by the
% magick string, is DDS.
%
% The format of the IsDDS method is:
%
% MagickBooleanType IsDDS(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsDDS(const unsigned char *magick, const size_t length)
{
if (length < 4)
return(MagickFalse);
if (LocaleNCompare((char *) magick,"DDS ", 4) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadDDSImage() reads a DirectDraw Surface image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadDDSImage method is:
%
% Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: The image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
status,
cubemap = MagickFalse,
volume = MagickFalse,
matte;
CompressionType
compression;
DDSInfo
dds_info;
DDSDecoder
*decoder;
size_t
n,
num_images;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Initialize image structure.
*/
if (ReadDDSInfo(image, &dds_info) != MagickTrue)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP)
cubemap = MagickTrue;
if (dds_info.ddscaps2 & DDSCAPS2_VOLUME && dds_info.depth > 0)
volume = MagickTrue;
(void) SeekBlob(image, 128, SEEK_SET);
/*
Determine pixel format
*/
if (dds_info.pixelformat.flags & DDPF_RGB)
{
compression = NoCompression;
if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS)
{
matte = MagickTrue;
decoder = ReadUncompressedRGBA;
}
else
{
matte = MagickTrue;
decoder = ReadUncompressedRGB;
}
}
else if (dds_info.pixelformat.flags & DDPF_LUMINANCE)
{
compression = NoCompression;
if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS)
{
/* Not sure how to handle this */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
else
{
matte = MagickFalse;
decoder = ReadUncompressedRGB;
}
}
else if (dds_info.pixelformat.flags & DDPF_FOURCC)
{
switch (dds_info.pixelformat.fourcc)
{
case FOURCC_DXT1:
{
matte = MagickFalse;
compression = DXT1Compression;
decoder = ReadDXT1;
break;
}
case FOURCC_DXT3:
{
matte = MagickTrue;
compression = DXT3Compression;
decoder = ReadDXT3;
break;
}
case FOURCC_DXT5:
{
matte = MagickTrue;
compression = DXT5Compression;
decoder = ReadDXT5;
break;
}
default:
{
/* Unknown FOURCC */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
}
}
else
{
/* Neither compressed nor uncompressed... thus unsupported */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
num_images = 1;
if (cubemap)
{
/*
Determine number of faces defined in the cubemap
*/
num_images = 0;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEX) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEX) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEY) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEY) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEZ) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEZ) num_images++;
}
if (volume)
num_images = dds_info.depth;
if ((num_images == 0) || (num_images > GetBlobSize(image)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (AcquireMagickResource(ListLengthResource,num_images) == MagickFalse)
ThrowReaderException(ResourceLimitError,"ListLengthExceedsLimit");
for (n = 0; n < num_images; n++)
{
if (n != 0)
{
if (EOFBlob(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
/* Start a new image */
AcquireNextImage(image_info,image);
if (GetNextImageInList(image) == (Image *) NULL)
return(DestroyImageList(image));
image=SyncNextImageInList(image);
}
image->matte = matte;
image->compression = compression;
image->columns = dds_info.width;
image->rows = dds_info.height;
image->storage_class = DirectClass;
image->endian = LSBEndian;
image->depth = 8;
if (image_info->ping != MagickFalse)
{
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
status=SetImageExtent(image,image->columns,image->rows);
if (status == MagickFalse)
{
InheritException(exception,&image->exception);
return(DestroyImageList(image));
}
(void) SetImageBackgroundColor(image);
if ((decoder)(image, &dds_info, exception) != MagickTrue)
{
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
static MagickBooleanType ReadDDSInfo(Image *image, DDSInfo *dds_info)
{
size_t
hdr_size,
required;
/* Seek to start of header */
(void) SeekBlob(image, 4, SEEK_SET);
/* Check header field */
hdr_size = ReadBlobLSBLong(image);
if (hdr_size != 124)
return MagickFalse;
/* Fill in DDS info struct */
dds_info->flags = ReadBlobLSBLong(image);
/* Check required flags */
required=(size_t) (DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT);
if ((dds_info->flags & required) != required)
return MagickFalse;
dds_info->height = ReadBlobLSBLong(image);
dds_info->width = ReadBlobLSBLong(image);
dds_info->pitchOrLinearSize = ReadBlobLSBLong(image);
dds_info->depth = ReadBlobLSBLong(image);
dds_info->mipmapcount = ReadBlobLSBLong(image);
(void) SeekBlob(image, 44, SEEK_CUR); /* reserved region of 11 DWORDs */
/* Read pixel format structure */
hdr_size = ReadBlobLSBLong(image);
if (hdr_size != 32)
return MagickFalse;
dds_info->pixelformat.flags = ReadBlobLSBLong(image);
dds_info->pixelformat.fourcc = ReadBlobLSBLong(image);
dds_info->pixelformat.rgb_bitcount = ReadBlobLSBLong(image);
dds_info->pixelformat.r_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.g_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.b_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.alpha_bitmask = ReadBlobLSBLong(image);
dds_info->ddscaps1 = ReadBlobLSBLong(image);
dds_info->ddscaps2 = ReadBlobLSBLong(image);
(void) SeekBlob(image, 12, SEEK_CUR); /* 3 reserved DWORDs */
return MagickTrue;
}
static MagickBooleanType ReadDXT1(Image *image,DDSInfo *dds_info,
ExceptionInfo *exception)
{
DDSColors
colors;
PixelPacket
*q;
register ssize_t
i,
x;
size_t
bits;
ssize_t
j,
y;
unsigned char
code;
unsigned short
c0,
c1;
for (y = 0; y < (ssize_t) image->rows; y += 4)
{
for (x = 0; x < (ssize_t) image->columns; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x),
MagickMin(4,image->rows-y),exception);
if (q == (PixelPacket *) NULL)
return MagickFalse;
/* Read 8 bytes of data from the image */
c0 = ReadBlobLSBShort(image);
c1 = ReadBlobLSBShort(image);
bits = ReadBlobLSBLong(image);
CalculateColors(c0, c1, &colors, MagickFalse);
if (EOFBlob(image) != MagickFalse)
break;
/* Write the pixels */
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if (((x + i) < (ssize_t) image->columns) &&
((y + j) < (ssize_t) image->rows))
{
code=(unsigned char) ((bits >> ((j*4+i)*2)) & 0x3);
SetPixelRed(q,ScaleCharToQuantum(colors.r[code]));
SetPixelGreen(q,ScaleCharToQuantum(colors.g[code]));
SetPixelBlue(q,ScaleCharToQuantum(colors.b[code]));
SetPixelOpacity(q,ScaleCharToQuantum(colors.a[code]));
if ((colors.a[code] != 0) && (image->matte == MagickFalse))
image->matte=MagickTrue; /* Correct matte */
q++;
}
}
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return MagickFalse;
}
if (EOFBlob(image) != MagickFalse)
break;
}
return(SkipDXTMipmaps(image,dds_info,8,exception));
}
static MagickBooleanType ReadDXT3(Image *image, DDSInfo *dds_info,
ExceptionInfo *exception)
{
DDSColors
colors;
ssize_t
j,
y;
PixelPacket
*q;
register ssize_t
i,
x;
unsigned char
alpha;
size_t
a0,
a1,
bits,
code;
unsigned short
c0,
c1;
for (y = 0; y < (ssize_t) dds_info->height; y += 4)
{
for (x = 0; x < (ssize_t) dds_info->width; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q = QueueAuthenticPixels(image, x, y, MagickMin(4, dds_info->width - x),
MagickMin(4, dds_info->height - y),exception);
if (q == (PixelPacket *) NULL)
return MagickFalse;
/* Read alpha values (8 bytes) */
a0 = ReadBlobLSBLong(image);
a1 = ReadBlobLSBLong(image);
/* Read 8 bytes of data from the image */
c0 = ReadBlobLSBShort(image);
c1 = ReadBlobLSBShort(image);
bits = ReadBlobLSBLong(image);
CalculateColors(c0, c1, &colors, MagickTrue);
if (EOFBlob(image) != MagickFalse)
break;
/* Write the pixels */
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if ((x + i) < (ssize_t) dds_info->width && (y + j) < (ssize_t) dds_info->height)
{
code = (bits >> ((4*j+i)*2)) & 0x3;
SetPixelRed(q,ScaleCharToQuantum(colors.r[code]));
SetPixelGreen(q,ScaleCharToQuantum(colors.g[code]));
SetPixelBlue(q,ScaleCharToQuantum(colors.b[code]));
/*
Extract alpha value: multiply 0..15 by 17 to get range 0..255
*/
if (j < 2)
alpha = 17U * (unsigned char) ((a0 >> (4*(4*j+i))) & 0xf);
else
alpha = 17U * (unsigned char) ((a1 >> (4*(4*(j-2)+i))) & 0xf);
SetPixelAlpha(q,ScaleCharToQuantum((unsigned char)
alpha));
q++;
}
}
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return MagickFalse;
}
if (EOFBlob(image) != MagickFalse)
break;
}
return(SkipDXTMipmaps(image,dds_info,16,exception));
}
static MagickBooleanType ReadDXT5(Image *image, DDSInfo *dds_info,
ExceptionInfo *exception)
{
DDSColors
colors;
ssize_t
j,
y;
MagickSizeType
alpha_bits;
PixelPacket
*q;
register ssize_t
i,
x;
unsigned char
a0,
a1;
size_t
alpha,
bits,
code,
alpha_code;
unsigned short
c0,
c1;
for (y = 0; y < (ssize_t) dds_info->height; y += 4)
{
for (x = 0; x < (ssize_t) dds_info->width; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q = QueueAuthenticPixels(image, x, y, MagickMin(4, dds_info->width - x),
MagickMin(4, dds_info->height - y),exception);
if (q == (PixelPacket *) NULL)
return MagickFalse;
/* Read alpha values (8 bytes) */
a0 = (unsigned char) ReadBlobByte(image);
a1 = (unsigned char) ReadBlobByte(image);
alpha_bits = (MagickSizeType)ReadBlobLSBLong(image);
alpha_bits = alpha_bits | ((MagickSizeType)ReadBlobLSBShort(image) << 32);
/* Read 8 bytes of data from the image */
c0 = ReadBlobLSBShort(image);
c1 = ReadBlobLSBShort(image);
bits = ReadBlobLSBLong(image);
CalculateColors(c0, c1, &colors, MagickTrue);
if (EOFBlob(image) != MagickFalse)
break;
/* Write the pixels */
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if ((x + i) < (ssize_t) dds_info->width && (y + j) < (ssize_t) dds_info->height)
{
code = (bits >> ((4*j+i)*2)) & 0x3;
SetPixelRed(q,ScaleCharToQuantum(colors.r[code]));
SetPixelGreen(q,ScaleCharToQuantum(colors.g[code]));
SetPixelBlue(q,ScaleCharToQuantum(colors.b[code]));
/* Extract alpha value */
alpha_code = (size_t) (alpha_bits >> (3*(4*j+i))) & 0x7;
if (alpha_code == 0)
alpha = a0;
else if (alpha_code == 1)
alpha = a1;
else if (a0 > a1)
alpha = ((8-alpha_code) * a0 + (alpha_code-1) * a1) / 7;
else if (alpha_code == 6)
alpha = 0;
else if (alpha_code == 7)
alpha = 255;
else
alpha = (((6-alpha_code) * a0 + (alpha_code-1) * a1) / 5);
SetPixelAlpha(q,ScaleCharToQuantum((unsigned char)
alpha));
q++;
}
}
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return MagickFalse;
}
if (EOFBlob(image) != MagickFalse)
break;
}
return(SkipDXTMipmaps(image,dds_info,16,exception));
}
static MagickBooleanType ReadUncompressedRGB(Image *image, DDSInfo *dds_info,
ExceptionInfo *exception)
{
PixelPacket
*q;
ssize_t
x, y;
unsigned short
color;
if (dds_info->pixelformat.rgb_bitcount == 8)
(void) SetImageType(image,GrayscaleType);
else if (dds_info->pixelformat.rgb_bitcount == 16 && !IsBitMask(
dds_info->pixelformat,0xf800,0x07e0,0x001f,0x0000))
ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported",
image->filename);
for (y = 0; y < (ssize_t) dds_info->height; y++)
{
q = QueueAuthenticPixels(image, 0, y, dds_info->width, 1,exception);
if (q == (PixelPacket *) NULL)
return MagickFalse;
for (x = 0; x < (ssize_t) dds_info->width; x++)
{
if (dds_info->pixelformat.rgb_bitcount == 8)
SetPixelGray(q,ScaleCharToQuantum(ReadBlobByte(image)));
else if (dds_info->pixelformat.rgb_bitcount == 16)
{
color=ReadBlobShort(image);
SetPixelRed(q,ScaleCharToQuantum((unsigned char)
(((color >> 11)/31.0)*255)));
SetPixelGreen(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 5) >> 10)/63.0)*255)));
SetPixelBlue(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 11) >> 11)/31.0)*255)));
}
else
{
SetPixelBlue(q,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)));
SetPixelGreen(q,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)));
SetPixelRed(q,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)));
if (dds_info->pixelformat.rgb_bitcount == 32)
(void) ReadBlobByte(image);
}
SetPixelAlpha(q,QuantumRange);
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return MagickFalse;
}
return(SkipRGBMipmaps(image,dds_info,3,exception));
}
static MagickBooleanType ReadUncompressedRGBA(Image *image, DDSInfo *dds_info,
ExceptionInfo *exception)
{
PixelPacket
*q;
ssize_t
alphaBits,
x,
y;
unsigned short
color;
alphaBits=0;
if (dds_info->pixelformat.rgb_bitcount == 16)
{
if (IsBitMask(dds_info->pixelformat,0x7c00,0x03e0,0x001f,0x8000))
alphaBits=1;
else if (IsBitMask(dds_info->pixelformat,0x00ff,0x00ff,0x00ff,0xff00))
{
alphaBits=2;
(void) SetImageType(image,GrayscaleMatteType);
}
else if (IsBitMask(dds_info->pixelformat,0x0f00,0x00f0,0x000f,0xf000))
alphaBits=4;
else
ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported",
image->filename);
}
for (y = 0; y < (ssize_t) dds_info->height; y++)
{
q = QueueAuthenticPixels(image, 0, y, dds_info->width, 1,exception);
if (q == (PixelPacket *) NULL)
return MagickFalse;
for (x = 0; x < (ssize_t) dds_info->width; x++)
{
if (dds_info->pixelformat.rgb_bitcount == 16)
{
color=ReadBlobShort(image);
if (alphaBits == 1)
{
SetPixelAlpha(q,(color & (1 << 15)) ? QuantumRange : 0);
SetPixelRed(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 1) >> 11)/31.0)*255)));
SetPixelGreen(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 6) >> 11)/31.0)*255)));
SetPixelBlue(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 11) >> 11)/31.0)*255)));
}
else if (alphaBits == 2)
{
SetPixelAlpha(q,ScaleCharToQuantum((unsigned char)
(color >> 8)));
SetPixelGray(q,ScaleCharToQuantum((unsigned char)color));
}
else
{
SetPixelAlpha(q,ScaleCharToQuantum((unsigned char)
(((color >> 12)/15.0)*255)));
SetPixelRed(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 4) >> 12)/15.0)*255)));
SetPixelGreen(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 8) >> 12)/15.0)*255)));
SetPixelBlue(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 12) >> 12)/15.0)*255)));
}
}
else
{
SetPixelBlue(q,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)));
SetPixelGreen(q,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)));
SetPixelRed(q,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)));
SetPixelAlpha(q,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)));
}
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return MagickFalse;
}
return(SkipRGBMipmaps(image,dds_info,4,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterDDSImage() adds attributes for the DDS image format to
% the list of supported formats. The attributes include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterDDSImage method is:
%
% RegisterDDSImage(void)
%
*/
ModuleExport size_t RegisterDDSImage(void)
{
MagickInfo
*entry;
entry = SetMagickInfo("DDS");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->seekable_stream=MagickTrue;
entry->description = ConstantString("Microsoft DirectDraw Surface");
entry->module = ConstantString("DDS");
(void) RegisterMagickInfo(entry);
entry = SetMagickInfo("DXT1");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->seekable_stream=MagickTrue;
entry->description = ConstantString("Microsoft DirectDraw Surface");
entry->module = ConstantString("DDS");
(void) RegisterMagickInfo(entry);
entry = SetMagickInfo("DXT5");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->seekable_stream=MagickTrue;
entry->description = ConstantString("Microsoft DirectDraw Surface");
entry->module = ConstantString("DDS");
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
static void RemapIndices(const ssize_t *map, const unsigned char *source,
unsigned char *target)
{
register ssize_t
i;
for (i = 0; i < 16; i++)
{
if (map[i] == -1)
target[i] = 3;
else
target[i] = source[map[i]];
}
}
/*
Skip the mipmap images for compressed (DXTn) dds files
*/
static MagickBooleanType SkipDXTMipmaps(Image *image,DDSInfo *dds_info,
int texel_size,ExceptionInfo *exception)
{
register ssize_t
i;
MagickOffsetType
offset;
size_t
h,
w;
/*
Only skip mipmaps for textures and cube maps
*/
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile",
image->filename);
return(MagickFalse);
}
if (dds_info->ddscaps1 & DDSCAPS_MIPMAP
&& (dds_info->ddscaps1 & DDSCAPS_TEXTURE
|| dds_info->ddscaps2 & DDSCAPS2_CUBEMAP))
{
w = DIV2(dds_info->width);
h = DIV2(dds_info->height);
/*
Mipmapcount includes the main image, so start from one
*/
for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++)
{
offset = (MagickOffsetType) ((w + 3) / 4) * ((h + 3) / 4) * texel_size;
if (SeekBlob(image,offset,SEEK_CUR) < 0)
break;
if ((w == 1) && (h == 1))
break;
w = DIV2(w);
h = DIV2(h);
}
}
return(MagickTrue);
}
/*
Skip the mipmap images for uncompressed (RGB or RGBA) dds files
*/
static MagickBooleanType SkipRGBMipmaps(Image *image,DDSInfo *dds_info,
int pixel_size,ExceptionInfo *exception)
{
MagickOffsetType
offset;
register ssize_t
i;
size_t
h,
w;
/*
Only skip mipmaps for textures and cube maps
*/
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
return(MagickFalse);
}
if (dds_info->ddscaps1 & DDSCAPS_MIPMAP
&& (dds_info->ddscaps1 & DDSCAPS_TEXTURE
|| dds_info->ddscaps2 & DDSCAPS2_CUBEMAP))
{
w = DIV2(dds_info->width);
h = DIV2(dds_info->height);
/*
Mipmapcount includes the main image, so start from one
*/
for (i=1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++)
{
offset = (MagickOffsetType) w * h * pixel_size;
if (SeekBlob(image,offset,SEEK_CUR) < 0)
break;
w = DIV2(w);
h = DIV2(h);
if ((w == 1) && (h == 1))
break;
}
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterDDSImage() removes format registrations made by the
% DDS module from the list of supported formats.
%
% The format of the UnregisterDDSImage method is:
%
% UnregisterDDSImage(void)
%
*/
ModuleExport void UnregisterDDSImage(void)
{
(void) UnregisterMagickInfo("DDS");
(void) UnregisterMagickInfo("DXT1");
(void) UnregisterMagickInfo("DXT5");
}
static void WriteAlphas(Image *image, const ssize_t* alphas, size_t min5,
size_t max5, size_t min7, size_t max7)
{
register ssize_t
i;
size_t
err5,
err7,
j;
unsigned char
indices5[16],
indices7[16];
FixRange(min5,max5,5);
err5 = CompressAlpha(min5,max5,5,alphas,indices5);
FixRange(min7,max7,7);
err7 = CompressAlpha(min7,max7,7,alphas,indices7);
if (err7 < err5)
{
for (i=0; i < 16; i++)
{
unsigned char
index;
index = indices7[i];
if( index == 0 )
indices5[i] = 1;
else if (index == 1)
indices5[i] = 0;
else
indices5[i] = 9 - index;
}
min5 = max7;
max5 = min7;
}
(void) WriteBlobByte(image,(unsigned char) min5);
(void) WriteBlobByte(image,(unsigned char) max5);
for(i=0; i < 2; i++)
{
size_t
value = 0;
for (j=0; j < 8; j++)
{
size_t index = (size_t) indices5[j + i*8];
value |= ( index << 3*j );
}
for (j=0; j < 3; j++)
{
size_t byte = (value >> 8*j) & 0xff;
(void) WriteBlobByte(image,(unsigned char) byte);
}
}
}
static void WriteCompressed(Image *image, const size_t count,
DDSVector4* points, const ssize_t* map, const MagickBooleanType clusterFit)
{
float
covariance[16];
DDSVector3
end,
principle,
start;
DDSVector4
metric;
unsigned char
indices[16];
VectorInit(metric,1.0f);
VectorInit3(start,0.0f);
VectorInit3(end,0.0f);
ComputeWeightedCovariance(count,points,covariance);
ComputePrincipleComponent(covariance,&principle);
if (clusterFit == MagickFalse || count == 0)
CompressRangeFit(count,points,map,principle,metric,&start,&end,indices);
else
CompressClusterFit(count,points,map,principle,metric,&start,&end,indices);
WriteIndices(image,start,end,indices);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WriteDDSImage() writes a DirectDraw Surface image file in the DXT5 format.
%
% The format of the WriteBMPImage method is:
%
% MagickBooleanType WriteDDSImage(const ImageInfo *image_info,Image *image)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
*/
static MagickBooleanType WriteDDSImage(const ImageInfo *image_info,
Image *image)
{
const char
*option;
size_t
compression,
columns,
maxMipmaps,
mipmaps,
pixelFormat,
rows;
MagickBooleanType
clusterFit,
status,
weightByAlpha;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception);
if (status == MagickFalse)
return(status);
(void) TransformImageColorspace(image,sRGBColorspace);
pixelFormat=DDPF_FOURCC;
compression=FOURCC_DXT5;
if (!image->matte)
compression=FOURCC_DXT1;
if (LocaleCompare(image_info->magick,"dxt1") == 0)
compression=FOURCC_DXT1;
option=GetImageOption(image_info,"dds:compression");
if (option != (char *) NULL)
{
if (LocaleCompare(option,"dxt1") == 0)
compression=FOURCC_DXT1;
if (LocaleCompare(option,"none") == 0)
pixelFormat=DDPF_RGB;
}
clusterFit=MagickFalse;
weightByAlpha=MagickFalse;
if (pixelFormat == DDPF_FOURCC)
{
option=GetImageOption(image_info,"dds:cluster-fit");
if (IsStringTrue(option) != MagickFalse)
{
clusterFit=MagickTrue;
if (compression != FOURCC_DXT1)
{
option=GetImageOption(image_info,"dds:weight-by-alpha");
if (IsStringTrue(option) != MagickFalse)
weightByAlpha=MagickTrue;
}
}
}
maxMipmaps=SIZE_MAX;
mipmaps=0;
if ((image->columns & (image->columns - 1)) == 0 &&
(image->rows & (image->rows - 1)) == 0)
{
option=GetImageOption(image_info,"dds:mipmaps");
if (option != (char *) NULL)
maxMipmaps=StringToUnsignedLong(option);
if (maxMipmaps != 0)
{
columns=image->columns;
rows=image->rows;
while ((columns != 1 || rows != 1) && mipmaps != maxMipmaps)
{
columns=DIV2(columns);
rows=DIV2(rows);
mipmaps++;
}
}
}
WriteDDSInfo(image,pixelFormat,compression,mipmaps);
WriteImageData(image,pixelFormat,compression,clusterFit,weightByAlpha,
&image->exception);
if (mipmaps > 0 && WriteMipmaps(image,pixelFormat,compression,mipmaps,
clusterFit,weightByAlpha,&image->exception) == MagickFalse)
return(MagickFalse);
(void) CloseBlob(image);
return(MagickTrue);
}
static void WriteDDSInfo(Image *image, const size_t pixelFormat,
const size_t compression, const size_t mipmaps)
{
char
software[MaxTextExtent];
register ssize_t
i;
unsigned int
format,
caps,
flags;
flags=(unsigned int) (DDSD_CAPS | DDSD_WIDTH | DDSD_HEIGHT |
DDSD_PIXELFORMAT);
caps=(unsigned int) DDSCAPS_TEXTURE;
format=(unsigned int) pixelFormat;
if (format == DDPF_FOURCC)
flags=flags | DDSD_LINEARSIZE;
else
flags=flags | DDSD_PITCH;
if (mipmaps > 0)
{
flags=flags | (unsigned int) DDSD_MIPMAPCOUNT;
caps=caps | (unsigned int) (DDSCAPS_MIPMAP | DDSCAPS_COMPLEX);
}
if (format != DDPF_FOURCC && image->matte)
format=format | DDPF_ALPHAPIXELS;
(void) WriteBlob(image,4,(unsigned char *) "DDS ");
(void) WriteBlobLSBLong(image,124);
(void) WriteBlobLSBLong(image,flags);
(void) WriteBlobLSBLong(image,(unsigned int) image->rows);
(void) WriteBlobLSBLong(image,(unsigned int) image->columns);
if (pixelFormat == DDPF_FOURCC)
{
/* Compressed DDS requires linear compressed size of first image */
if (compression == FOURCC_DXT1)
(void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1,
(image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*8));
else /* DXT5 */
(void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1,
(image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*16));
}
else
{
/* Uncompressed DDS requires byte pitch of first image */
if (image->matte != MagickFalse)
(void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 4));
else
(void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 3));
}
(void) WriteBlobLSBLong(image,0x00);
(void) WriteBlobLSBLong(image,(unsigned int) mipmaps+1);
(void) memset(software,0,sizeof(software));
(void) CopyMagickString(software,"IMAGEMAGICK",MaxTextExtent);
(void) WriteBlob(image,44,(unsigned char *) software);
(void) WriteBlobLSBLong(image,32);
(void) WriteBlobLSBLong(image,format);
if (pixelFormat == DDPF_FOURCC)
{
(void) WriteBlobLSBLong(image,(unsigned int) compression);
for(i=0;i < 5;i++) // bitcount / masks
(void) WriteBlobLSBLong(image,0x00);
}
else
{
(void) WriteBlobLSBLong(image,0x00);
if (image->matte != MagickFalse)
{
(void) WriteBlobLSBLong(image,32);
(void) WriteBlobLSBLong(image,0xff0000);
(void) WriteBlobLSBLong(image,0xff00);
(void) WriteBlobLSBLong(image,0xff);
(void) WriteBlobLSBLong(image,0xff000000);
}
else
{
(void) WriteBlobLSBLong(image,24);
(void) WriteBlobLSBLong(image,0xff0000);
(void) WriteBlobLSBLong(image,0xff00);
(void) WriteBlobLSBLong(image,0xff);
(void) WriteBlobLSBLong(image,0x00);
}
}
(void) WriteBlobLSBLong(image,caps);
for(i=0;i < 4;i++) // ddscaps2 + reserved region
(void) WriteBlobLSBLong(image,0x00);
}
static void WriteFourCC(Image *image, const size_t compression,
const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha,
ExceptionInfo *exception)
{
register const PixelPacket
*p;
register ssize_t
x;
ssize_t
i,
y,
bx,
by;
for (y=0; y < (ssize_t) image->rows; y+=4)
{
for (x=0; x < (ssize_t) image->columns; x+=4)
{
MagickBooleanType
match;
DDSVector4
point,
points[16];
size_t
count = 0,
max5 = 0,
max7 = 0,
min5 = 255,
min7 = 255,
columns = 4,
rows = 4;
ssize_t
alphas[16],
map[16];
unsigned char
alpha;
if (x + columns >= image->columns)
columns = image->columns - x;
if (y + rows >= image->rows)
rows = image->rows - y;
p=GetVirtualPixels(image,x,y,columns,rows,exception);
if (p == (const PixelPacket *) NULL)
break;
for (i=0; i<16; i++)
{
map[i] = -1;
alphas[i] = -1;
}
for (by=0; by < (ssize_t) rows; by++)
{
for (bx=0; bx < (ssize_t) columns; bx++)
{
if (compression == FOURCC_DXT5)
alpha = ScaleQuantumToChar(GetPixelAlpha(p));
else
alpha = 255;
if (compression == FOURCC_DXT5)
{
if (alpha < min7)
min7 = alpha;
if (alpha > max7)
max7 = alpha;
if (alpha != 0 && alpha < min5)
min5 = alpha;
if (alpha != 255 && alpha > max5)
max5 = alpha;
}
alphas[4*by + bx] = (size_t)alpha;
point.x = (float)ScaleQuantumToChar(GetPixelRed(p)) / 255.0f;
point.y = (float)ScaleQuantumToChar(GetPixelGreen(p)) / 255.0f;
point.z = (float)ScaleQuantumToChar(GetPixelBlue(p)) / 255.0f;
point.w = weightByAlpha ? (float)(alpha + 1) / 256.0f : 1.0f;
p++;
match = MagickFalse;
for (i=0; i < (ssize_t) count; i++)
{
if ((points[i].x == point.x) &&
(points[i].y == point.y) &&
(points[i].z == point.z) &&
(alpha >= 128 || compression == FOURCC_DXT5))
{
points[i].w += point.w;
map[4*by + bx] = i;
match = MagickTrue;
break;
}
}
if (match != MagickFalse)
continue;
points[count].x = point.x;
points[count].y = point.y;
points[count].z = point.z;
points[count].w = point.w;
map[4*by + bx] = count;
count++;
}
}
for (i=0; i < (ssize_t) count; i++)
points[i].w = sqrt(points[i].w);
if (compression == FOURCC_DXT5)
WriteAlphas(image,alphas,min5,max5,min7,max7);
if (count == 1)
WriteSingleColorFit(image,points,map);
else
WriteCompressed(image,count,points,map,clusterFit);
}
}
}
static void WriteImageData(Image *image, const size_t pixelFormat,
const size_t compression, const MagickBooleanType clusterFit,
const MagickBooleanType weightByAlpha, ExceptionInfo *exception)
{
if (pixelFormat == DDPF_FOURCC)
WriteFourCC(image,compression,clusterFit,weightByAlpha,exception);
else
WriteUncompressed(image,exception);
}
static inline size_t ClampToLimit(const float value,
const size_t limit)
{
size_t
result = (int) (value + 0.5f);
if (result < 0.0f)
return(0);
if (result > limit)
return(limit);
return result;
}
static inline size_t ColorTo565(const DDSVector3 point)
{
size_t r = ClampToLimit(31.0f*point.x,31);
size_t g = ClampToLimit(63.0f*point.y,63);
size_t b = ClampToLimit(31.0f*point.z,31);
return (r << 11) | (g << 5) | b;
}
static void WriteIndices(Image *image, const DDSVector3 start,
const DDSVector3 end, unsigned char* indices)
{
register ssize_t
i;
size_t
a,
b;
unsigned char
remapped[16];
const unsigned char
*ind;
a = ColorTo565(start);
b = ColorTo565(end);
for (i=0; i<16; i++)
{
if( a < b )
remapped[i] = (indices[i] ^ 0x1) & 0x3;
else if( a == b )
remapped[i] = 0;
else
remapped[i] = indices[i];
}
if( a < b )
Swap(a,b);
(void) WriteBlobByte(image,(unsigned char) (a & 0xff));
(void) WriteBlobByte(image,(unsigned char) (a >> 8));
(void) WriteBlobByte(image,(unsigned char) (b & 0xff));
(void) WriteBlobByte(image,(unsigned char) (b >> 8));
for (i=0; i<4; i++)
{
ind = remapped + 4*i;
(void) WriteBlobByte(image,ind[0] | (ind[1] << 2) | (ind[2] << 4) |
(ind[3] << 6));
}
}
static MagickBooleanType WriteMipmaps(Image *image, const size_t pixelFormat,
const size_t compression, const size_t mipmaps,
const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha,
ExceptionInfo *exception)
{
Image*
resize_image;
register ssize_t
i;
size_t
columns,
rows;
columns = image->columns;
rows = image->rows;
for (i=0; i< (ssize_t) mipmaps; i++)
{
resize_image = ResizeImage(image,DIV2(columns),DIV2(rows),TriangleFilter,1.0,
exception);
if (resize_image == (Image *) NULL)
return(MagickFalse);
DestroyBlob(resize_image);
resize_image->blob=ReferenceBlob(image->blob);
WriteImageData(resize_image,pixelFormat,compression,weightByAlpha,
clusterFit,exception);
resize_image=DestroyImage(resize_image);
columns = DIV2(columns);
rows = DIV2(rows);
}
return(MagickTrue);
}
static void WriteSingleColorFit(Image *image, const DDSVector4* points,
const ssize_t* map)
{
DDSVector3
start,
end;
register ssize_t
i;
unsigned char
color[3],
index,
indexes[16],
indices[16];
color[0] = (unsigned char) ClampToLimit(255.0f*points->x,255);
color[1] = (unsigned char) ClampToLimit(255.0f*points->y,255);
color[2] = (unsigned char) ClampToLimit(255.0f*points->z,255);
index=0;
ComputeEndPoints(DDS_LOOKUP,color,&start,&end,&index);
for (i=0; i< 16; i++)
indexes[i]=index;
RemapIndices(map,indexes,indices);
WriteIndices(image,start,end,indices);
}
static void WriteUncompressed(Image *image, ExceptionInfo *exception)
{
register const PixelPacket
*p;
register ssize_t
x;
ssize_t
y;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelBlue(p)));
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelGreen(p)));
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelRed(p)));
if (image->matte)
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelAlpha(p)));
p++;
}
}
}
|
par_relax.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Relaxation scheme
*
*****************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "Common.h"
#include "_hypre_lapack.h"
#include "par_relax.h"
/*--------------------------------------------------------------------------
* hypre_BoomerAMGRelax
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGRelax( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int *cf_marker,
HYPRE_Int relax_type,
HYPRE_Int relax_points,
HYPRE_Real relax_weight,
HYPRE_Real omega,
HYPRE_Real *l1_norms,
hypre_ParVector *u,
hypre_ParVector *Vtemp,
hypre_ParVector *Ztemp )
{
HYPRE_Int relax_error = 0;
/*---------------------------------------------------------------------------------------
* Switch statement to direct control based on relax_type:
* relax_type = 0 -> Jacobi or CF-Jacobi
* relax_type = 1 -> Gauss-Seidel <--- very slow, sequential
* relax_type = 2 -> Gauss_Seidel: interior points in parallel,
* boundary sequential
* relax_type = 3 -> hybrid: SOR-J mix off-processor, SOR on-processor
* with outer relaxation parameters (forward solve)
* relax_type = 4 -> hybrid: SOR-J mix off-processor, SOR on-processor
* with outer relaxation parameters (backward solve)
* relax_type = 5 -> hybrid: GS-J mix off-processor, chaotic GS on-node
* relax_type = 6 -> hybrid: SSOR-J mix off-processor, SSOR on-processor
* with outer relaxation parameters
* relax_type = 7 -> Jacobi (uses Matvec), only needed in CGNR [GPU-supported]
* relax_type = 8 -> hybrid L1 Symm. Gauss-Seidel
* relax_type = 9 -> Direct solve, Gaussian elimination
* relax_type = 10 -> On-processor direct forward solve for matrices with
* triangular structure (indices need not be ordered
* triangular)
* relax_type = 11 -> Two Stage approximation to GS. Uses the strict lower
* part of the diagonal matrix
* relax_type = 12 -> Two Stage approximation to GS. Uses the full diagonal
* matrix
* relax_type = 13 -> hybrid L1 Gauss-Seidel forward solve
* relax_type = 14 -> hybrid L1 Gauss-Seidel backward solve
* relax_type = 15 -> CG
* relax_type = 16 -> Scaled Chebyshev
* relax_type = 17 -> FCF-Jacobi
* relax_type = 18 -> L1-Jacobi [GPU-supported]
* relax_type = 19 -> Direct Solve, (old version)
* relax_type = 20 -> Kaczmarz
* relax_type = 29 -> Direct solve: use gaussian elimination & BLAS
* (with pivoting) (old version)
* relax_type = 98 -> Direct solve, Gaussian elimination
* relax_type = 99 -> Direct solve, Gaussian elimination
* relax_type = 199-> Direct solve, Gaussian elimination
*-------------------------------------------------------------------------------------*/
switch (relax_type)
{
case 0: /* Weighted Jacobi */
hypre_BoomerAMGRelax0WeightedJacobi(A, f, cf_marker, relax_points, relax_weight, u, Vtemp);
break;
case 1: /* Gauss-Seidel VERY SLOW */
hypre_BoomerAMGRelax1GaussSeidel(A, f, cf_marker, relax_points, u);
break;
case 2: /* Gauss-Seidel: relax interior points in parallel, boundary sequentially */
hypre_BoomerAMGRelax2GaussSeidel(A, f, cf_marker, relax_points, u);
break;
/* Hybrid: Jacobi off-processor, Gauss-Seidel on-processor (forward loop) */
case 3:
hypre_BoomerAMGRelax3HybridGaussSeidel(A, f, cf_marker, relax_points, relax_weight, omega, u, Vtemp, Ztemp);
break;
case 4: /* Hybrid: Jacobi off-processor, Gauss-Seidel/SOR on-processor (backward loop) */
hypre_BoomerAMGRelax4HybridGaussSeidel(A, f, cf_marker, relax_points, relax_weight, omega, u, Vtemp, Ztemp);
break;
case 5: /* Hybrid: Jacobi off-processor, chaotic Gauss-Seidel on-processor */
hypre_BoomerAMGRelax5ChaoticHybridGaussSeidel(A, f, cf_marker, relax_points, u);
break;
case 6: /* Hybrid: Jacobi off-processor, Symm. Gauss-Seidel/SSOR on-processor with outer relaxation parameter */
hypre_BoomerAMGRelax6HybridSSOR(A, f, cf_marker, relax_points, relax_weight, omega, u, Vtemp, Ztemp);
break;
case 7: /* Jacobi (uses ParMatvec) */
hypre_BoomerAMGRelax7Jacobi(A, f, relax_points, relax_weight, l1_norms, u, Vtemp);
break;
case 8: /* hybrid L1 Symm. Gauss-Seidel */
hypre_BoomerAMGRelax8HybridL1SSOR(A, f, cf_marker, relax_points, relax_weight, omega, l1_norms, u, Vtemp, Ztemp);
break;
/* Hybrid: Jacobi off-processor, ordered Gauss-Seidel on-processor */
case 10:
hypre_BoomerAMGRelax10TopoOrderedGaussSeidel(A, f, cf_marker, relax_points, relax_weight, omega, u, Vtemp, Ztemp);
break;
case 11: /* Two Stage Gauss Seidel. Forward sweep only */
hypre_BoomerAMGRelax11TwoStageGaussSeidel(A, f, cf_marker, relax_points, relax_weight, omega, u, Vtemp, Ztemp);
break;
case 12: /* Two Stage Gauss Seidel. Uses the diagonal matrix for the GS part */
hypre_BoomerAMGRelax12TwoStageGaussSeidel(A, f, cf_marker, relax_points, relax_weight, omega, u, Vtemp, Ztemp);
break;
case 13: /* hybrid L1 Gauss-Seidel forward solve */
hypre_BoomerAMGRelax13HybridL1GaussSeidel(A, f, cf_marker, relax_points, relax_weight, omega, l1_norms, u, Vtemp, Ztemp);
break;
case 14: /* hybrid L1 Gauss-Seidel backward solve */
hypre_BoomerAMGRelax14HybridL1GaussSeidel(A, f, cf_marker, relax_points, relax_weight, omega, l1_norms, u, Vtemp, Ztemp);
break;
case 18: /* weighted L1 Jacobi */
hypre_BoomerAMGRelax18WeightedL1Jacobi(A, f, cf_marker, relax_points, relax_weight, l1_norms, u, Vtemp);
break;
case 19: /* Direct solve: use gaussian elimination */
relax_error = hypre_BoomerAMGRelax19GaussElim(A, f, u);
break;
case 20: /* Kaczmarz */
hypre_BoomerAMGRelaxKaczmarz(A, f, omega, l1_norms, u);
break;
case 98: /* Direct solve: use gaussian elimination & BLAS (with pivoting) */
relax_error = hypre_BoomerAMGRelax98GaussElimPivot(A, f, u);
break;
}
return relax_error;
}
HYPRE_Int
hypre_BoomerAMGRelaxWeightedJacobi_core( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int *cf_marker,
HYPRE_Int relax_points,
HYPRE_Real relax_weight,
HYPRE_Real *l1_norms,
hypre_ParVector *u,
hypre_ParVector *Vtemp,
HYPRE_Int Skip_diag )
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
hypre_Vector *u_local = hypre_ParVectorLocalVector(u);
HYPRE_Complex *u_data = hypre_VectorData(u_local);
hypre_Vector *f_local = hypre_ParVectorLocalVector(f);
HYPRE_Complex *f_data = hypre_VectorData(f_local);
hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp);
HYPRE_Complex *Vtemp_data = hypre_VectorData(Vtemp_local);
HYPRE_Complex *v_ext_data = NULL;
HYPRE_Complex *v_buf_data = NULL;
HYPRE_Complex zero = 0.0;
HYPRE_Real one_minus_weight = 1.0 - relax_weight;
HYPRE_Complex res;
HYPRE_Int num_procs, my_id, i, j, ii, jj, index, num_sends, start;
hypre_ParCSRCommHandle *comm_handle;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
v_ext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, v_buf_data, v_ext_data);
}
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
{
Vtemp_data[i] = u_data[i];
}
if (num_procs > 1)
{
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
/*-----------------------------------------------------------------
* Relax all points.
*-----------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
{
const HYPRE_Complex di = l1_norms ? l1_norms[i] : A_diag_data[A_diag_i[i]];
/*-----------------------------------------------------------
* If i is of the right type ( C or F or All ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------*/
if ( (relax_points == 0 || cf_marker[i] == relax_points) && di != zero )
{
res = f_data[i];
for (jj = A_diag_i[i] + Skip_diag; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * v_ext_data[ii];
}
if (Skip_diag)
{
u_data[i] *= one_minus_weight;
u_data[i] += relax_weight * res / di;
}
else
{
u_data[i] += relax_weight * res / di;
}
}
}
if (num_procs > 1)
{
hypre_TFree(v_ext_data, HYPRE_MEMORY_HOST);
hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST);
}
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGRelax0WeightedJacobi( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int *cf_marker,
HYPRE_Int relax_points,
HYPRE_Real relax_weight,
hypre_ParVector *u,
hypre_ParVector *Vtemp )
{
return hypre_BoomerAMGRelaxWeightedJacobi_core(A, f, cf_marker, relax_points, relax_weight, NULL, u, Vtemp, 1);
}
HYPRE_Int
hypre_BoomerAMGRelax18WeightedL1Jacobi( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int *cf_marker,
HYPRE_Int relax_points,
HYPRE_Real relax_weight,
HYPRE_Real *l1_norms,
hypre_ParVector *u,
hypre_ParVector *Vtemp )
{
#if defined(HYPRE_USING_CUDA)
//HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_VectorMemoryLocation(x), hypre_VectorMemoryLocation(b) );
//RL: TODO back to hypre_GetExecPolicy2 later
HYPRE_ExecutionPolicy exec = HYPRE_EXEC_DEVICE;
// TODO implement CF relax on GPUs
if (relax_points != 0)
{
exec = HYPRE_EXEC_HOST;
}
if (exec == HYPRE_EXEC_DEVICE)
{
// XXX GPU calls Relax7 XXX
return hypre_BoomerAMGRelax7Jacobi(A, f, relax_points, relax_weight, l1_norms, u, Vtemp);
}
else
#endif
{
/* in the case of non-CF, use relax-7 which is faster */
if (relax_points == 0)
{
return hypre_BoomerAMGRelax7Jacobi(A, f, relax_points, relax_weight, l1_norms, u, Vtemp);
}
else
{
return hypre_BoomerAMGRelaxWeightedJacobi_core(A, f, cf_marker, relax_points, relax_weight, l1_norms, u, Vtemp, 0);
}
}
}
HYPRE_Int
hypre_BoomerAMGRelax1GaussSeidel( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int *cf_marker,
HYPRE_Int relax_points,
hypre_ParVector *u )
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
hypre_Vector *u_local = hypre_ParVectorLocalVector(u);
HYPRE_Complex *u_data = hypre_VectorData(u_local);
hypre_Vector *f_local = hypre_ParVectorLocalVector(f);
HYPRE_Complex *f_data = hypre_VectorData(f_local);
HYPRE_Complex *v_ext_data = NULL;
HYPRE_Complex *v_buf_data = NULL;
HYPRE_Complex zero = 0.0;
HYPRE_Complex res;
HYPRE_Int num_procs, my_id, i, j, ii, jj, p, jr, ip, num_sends, num_recvs, vec_start, vec_len;
hypre_MPI_Status *status;
hypre_MPI_Request *requests;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
v_ext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
status = hypre_CTAlloc(hypre_MPI_Status, num_recvs+num_sends, HYPRE_MEMORY_HOST);
requests = hypre_CTAlloc(hypre_MPI_Request, num_recvs+num_sends, HYPRE_MEMORY_HOST);
}
/*-----------------------------------------------------------------
* Relax all points.
*-----------------------------------------------------------------*/
for (p = 0; p < num_procs; p++)
{
jr = 0;
if (p != my_id)
{
for (i = 0; i < num_sends; i++)
{
ip = hypre_ParCSRCommPkgSendProc(comm_pkg, i);
if (ip == p)
{
vec_start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
vec_len = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1) - vec_start;
for (j = vec_start; j < vec_start+vec_len; j++)
{
v_buf_data[j] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
hypre_MPI_Isend(&v_buf_data[vec_start], vec_len, HYPRE_MPI_REAL, ip, 0, comm, &requests[jr++]);
}
}
hypre_MPI_Waitall(jr, requests, status);
hypre_MPI_Barrier(comm);
}
else
{
if (num_procs > 1)
{
for (i = 0; i < num_recvs; i++)
{
ip = hypre_ParCSRCommPkgRecvProc(comm_pkg, i);
vec_start = hypre_ParCSRCommPkgRecvVecStart(comm_pkg,i);
vec_len = hypre_ParCSRCommPkgRecvVecStart(comm_pkg,i+1) - vec_start;
hypre_MPI_Irecv(&v_ext_data[vec_start], vec_len, HYPRE_MPI_REAL, ip, 0, comm, &requests[jr++]);
}
hypre_MPI_Waitall(jr, requests, status);
}
for (i = 0; i < num_rows; i++)
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------*/
if ( (relax_points == 0 || cf_marker[i] == relax_points) && A_diag_data[A_diag_i[i]] != zero )
{
res = f_data[i];
for (jj = A_diag_i[i] + 1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * v_ext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
if (num_procs > 1)
{
hypre_MPI_Barrier(comm);
}
}
}
if (num_procs > 1)
{
hypre_TFree(v_ext_data, HYPRE_MEMORY_HOST);
hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(status, HYPRE_MEMORY_HOST);
hypre_TFree(requests, HYPRE_MEMORY_HOST);
}
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGRelax2GaussSeidel( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int *cf_marker,
HYPRE_Int relax_points,
hypre_ParVector *u )
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
hypre_Vector *u_local = hypre_ParVectorLocalVector(u);
HYPRE_Complex *u_data = hypre_VectorData(u_local);
hypre_Vector *f_local = hypre_ParVectorLocalVector(f);
HYPRE_Complex *f_data = hypre_VectorData(f_local);
HYPRE_Complex *v_ext_data = NULL;
HYPRE_Complex *v_buf_data = NULL;
HYPRE_Complex zero = 0.0;
HYPRE_Complex res;
HYPRE_Int num_procs, my_id, i, j, ii, jj, p, jr, ip, num_sends, num_recvs, vec_start, vec_len;
hypre_MPI_Status *status;
hypre_MPI_Request *requests;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
v_ext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
status = hypre_CTAlloc(hypre_MPI_Status, num_recvs+num_sends, HYPRE_MEMORY_HOST);
requests = hypre_CTAlloc(hypre_MPI_Request, num_recvs+num_sends, HYPRE_MEMORY_HOST);
}
/*-----------------------------------------------------------------
* Relax interior points first
*-----------------------------------------------------------------*/
for (i = 0; i < num_rows; i++)
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F or All ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( (relax_points == 0 || cf_marker[i] == relax_points) && A_offd_i[i+1] - A_offd_i[i] == zero &&
A_diag_data[A_diag_i[i]] != zero )
{
res = f_data[i];
for (jj = A_diag_i[i] + 1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
for (p = 0; p < num_procs; p++)
{
jr = 0;
if (p != my_id)
{
for (i = 0; i < num_sends; i++)
{
ip = hypre_ParCSRCommPkgSendProc(comm_pkg, i);
if (ip == p)
{
vec_start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
vec_len = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1) - vec_start;
for (j = vec_start; j < vec_start+vec_len; j++)
{
v_buf_data[j] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
hypre_MPI_Isend(&v_buf_data[vec_start], vec_len, HYPRE_MPI_REAL, ip, 0, comm, &requests[jr++]);
}
}
hypre_MPI_Waitall(jr, requests, status);
hypre_MPI_Barrier(comm);
}
else
{
if (num_procs > 1)
{
for (i = 0; i < num_recvs; i++)
{
ip = hypre_ParCSRCommPkgRecvProc(comm_pkg, i);
vec_start = hypre_ParCSRCommPkgRecvVecStart(comm_pkg,i);
vec_len = hypre_ParCSRCommPkgRecvVecStart(comm_pkg,i+1) - vec_start;
hypre_MPI_Irecv(&v_ext_data[vec_start], vec_len, HYPRE_MPI_REAL, ip, 0, comm, &requests[jr++]);
}
hypre_MPI_Waitall(jr, requests, status);
}
for (i = 0; i < num_rows; i++)
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F or All) and diagonal is
* nonzero, relax point i; otherwise, skip it.
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------*/
if ( (relax_points == 0 || cf_marker[i] == relax_points) && A_offd_i[i+1] - A_offd_i[i] != zero &&
A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * v_ext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
if (num_procs > 1)
{
hypre_MPI_Barrier(comm);
}
}
}
if (num_procs > 1)
{
hypre_TFree(v_ext_data, HYPRE_MEMORY_HOST);
hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(status, HYPRE_MEMORY_HOST);
hypre_TFree(requests, HYPRE_MEMORY_HOST);
}
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGRelaxHybridGaussSeidel_core( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int *cf_marker,
HYPRE_Int relax_points,
HYPRE_Real relax_weight,
HYPRE_Real omega,
HYPRE_Real *l1_norms,
hypre_ParVector *u,
hypre_ParVector *Vtemp,
hypre_ParVector *Ztemp,
HYPRE_Int GS_order,
HYPRE_Int Symm,
HYPRE_Int Skip_diag,
HYPRE_Int forced_seq,
HYPRE_Int Topo_order )
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
hypre_Vector *u_local = hypre_ParVectorLocalVector(u);
HYPRE_Complex *u_data = hypre_VectorData(u_local);
hypre_Vector *f_local = hypre_ParVectorLocalVector(f);
HYPRE_Complex *f_data = hypre_VectorData(f_local);
hypre_Vector *Vtemp_local = Vtemp ? hypre_ParVectorLocalVector(Vtemp) : NULL;
HYPRE_Complex *Vtemp_data = Vtemp_local ? hypre_VectorData(Vtemp_local) : NULL;
/*
hypre_Vector *Ztemp_local = NULL;
HYPRE_Complex *Ztemp_data = NULL;
*/
HYPRE_Complex *v_ext_data = NULL;
HYPRE_Complex *v_buf_data = NULL;
HYPRE_Int *proc_ordering = NULL;
const HYPRE_Real one_minus_omega = 1.0 - omega;
HYPRE_Int num_procs, my_id, num_threads, j, num_sends;
hypre_ParCSRCommHandle *comm_handle;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
num_threads = forced_seq ? 1 : hypre_NumThreads();
/* GS order: forward or backward */
const HYPRE_Int gs_order = GS_order > 0 ? 1 : -1;
/* for symmetric GS, a forward followed by a backward */
const HYPRE_Int num_sweeps = Symm ? 2 : 1;
/* if relax_weight and omega are both 1.0 */
const HYPRE_Int non_scale = relax_weight == 1.0 && omega == 1.0;
/* */
const HYPRE_Real prod = 1.0 - relax_weight * omega;
/*
if (num_threads > 1)
{
Ztemp_local = hypre_ParVectorLocalVector(Ztemp);
Ztemp_data = hypre_VectorData(Ztemp_local);
}
*/
#if defined(HYPRE_USING_PERSISTENT_COMM)
// JSP: persistent comm can be similarly used for other smoothers
hypre_ParCSRPersistentCommHandle *persistent_comm_handle;
#endif
if (num_procs > 1)
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime();
#endif
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
#if defined(HYPRE_USING_PERSISTENT_COMM)
persistent_comm_handle = hypre_ParCSRCommPkgGetPersistentCommHandle(1, comm_pkg);
v_buf_data = (HYPRE_Real *) hypre_ParCSRCommHandleSendDataBuffer(persistent_comm_handle);
v_ext_data = (HYPRE_Real *) hypre_ParCSRCommHandleRecvDataBuffer(persistent_comm_handle);
#else
v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
v_ext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
#endif
HYPRE_Int begin = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0);
HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for (j = begin; j < end; j++)
{
v_buf_data[j - begin] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)];
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime();
hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime();
#endif
#if defined(HYPRE_USING_PERSISTENT_COMM)
hypre_ParCSRPersistentCommHandleStart(persistent_comm_handle, HYPRE_MEMORY_HOST, v_buf_data);
#else
comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, v_buf_data, v_ext_data);
#endif
#if defined(HYPRE_USING_PERSISTENT_COMM)
hypre_ParCSRPersistentCommHandleWait(persistent_comm_handle, HYPRE_MEMORY_HOST, v_ext_data);
#else
hypre_ParCSRCommHandleDestroy(comm_handle);
#endif
comm_handle = NULL;
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime();
#endif
}
if (Topo_order)
{
/* Check for ordering of matrix. If stored, get pointer, otherwise
* compute ordering and point matrix variable to array.
* Used in AIR
*/
if (!hypre_ParCSRMatrixProcOrdering(A))
{
proc_ordering = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST);
hypre_topo_sort(A_diag_i, A_diag_j, A_diag_data, proc_ordering, num_rows);
hypre_ParCSRMatrixProcOrdering(A) = proc_ordering;
}
else
{
proc_ordering = hypre_ParCSRMatrixProcOrdering(A);
}
}
/*-----------------------------------------------------------------
* Relax all points.
*-----------------------------------------------------------------*/
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RELAX] -= hypre_MPI_Wtime();
#endif
if ( (num_threads > 1 || !non_scale) && Vtemp_data )
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_rows; j++)
{
Vtemp_data[j] = u_data[j];
}
}
if (num_threads > 1)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
HYPRE_Int ns, ne, sweep;
hypre_partition1D(num_rows, num_threads, j, &ns, &ne);
for (sweep = 0; sweep < num_sweeps; sweep++)
{
const HYPRE_Int iorder = num_sweeps == 1 ? gs_order : sweep == 0 ? 1 : -1;
const HYPRE_Int ibegin = iorder > 0 ? ns : ne - 1;
const HYPRE_Int iend = iorder > 0 ? ne : ns - 1;
if (non_scale)
{
hypre_HybridGaussSeidelNSThreads(A_diag_i, A_diag_j, A_diag_data, A_offd_i, A_offd_j, A_offd_data,
f_data, cf_marker, relax_points, l1_norms, u_data, Vtemp_data, v_ext_data,
ns, ne, ibegin, iend, iorder, Skip_diag);
}
else
{
hypre_HybridGaussSeidelThreads(A_diag_i, A_diag_j, A_diag_data, A_offd_i, A_offd_j, A_offd_data,
f_data, cf_marker, relax_points, relax_weight, omega, one_minus_omega,
prod, l1_norms, u_data, Vtemp_data, v_ext_data, ns, ne, ibegin, iend, iorder, Skip_diag);
}
} /* for (sweep = 0; sweep < num_sweeps; sweep++) */
} /* for (j = 0; j < num_threads; j++) */
}
else /* if (num_threads > 1) */
{
HYPRE_Int sweep;
for (sweep = 0; sweep < num_sweeps; sweep++)
{
const HYPRE_Int iorder = num_sweeps == 1 ? gs_order : sweep == 0 ? 1 : -1;
const HYPRE_Int ibegin = iorder > 0 ? 0 : num_rows - 1;
const HYPRE_Int iend = iorder > 0 ? num_rows : -1;
if (Topo_order)
{
hypre_HybridGaussSeidelOrderedNS(A_diag_i, A_diag_j, A_diag_data, A_offd_i, A_offd_j, A_offd_data,
f_data, cf_marker, relax_points, u_data, NULL, v_ext_data,
ibegin, iend, iorder, proc_ordering);
}
else
{
if (non_scale)
{
hypre_HybridGaussSeidelNS(A_diag_i, A_diag_j, A_diag_data, A_offd_i, A_offd_j, A_offd_data,
f_data, cf_marker, relax_points, l1_norms, u_data, Vtemp_data, v_ext_data,
ibegin, iend, iorder, Skip_diag);
}
else
{
hypre_HybridGaussSeidel(A_diag_i, A_diag_j, A_diag_data, A_offd_i, A_offd_j, A_offd_data,
f_data, cf_marker, relax_points, relax_weight, omega, one_minus_omega,
prod, l1_norms, u_data, Vtemp_data, v_ext_data, ibegin, iend, iorder, Skip_diag);
}
}
} /* for (sweep = 0; sweep < num_sweeps; sweep++) */
} /* if (num_threads > 1) */
#ifndef HYPRE_USING_PERSISTENT_COMM
if (num_procs > 1)
{
hypre_TFree(v_ext_data, HYPRE_MEMORY_HOST);
hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST);
}
#endif
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RELAX] += hypre_MPI_Wtime();
#endif
return hypre_error_flag;
}
/* forward hybrid G-S */
HYPRE_Int
hypre_BoomerAMGRelax3HybridGaussSeidel( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int *cf_marker,
HYPRE_Int relax_points,
HYPRE_Real relax_weight,
HYPRE_Real omega,
hypre_ParVector *u,
hypre_ParVector *Vtemp,
hypre_ParVector *Ztemp )
{
#if defined(HYPRE_USING_CUDA)
//HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_VectorMemoryLocation(x), hypre_VectorMemoryLocation(b) );
//RL: TODO back to hypre_GetExecPolicy2 later
HYPRE_ExecutionPolicy exec = HYPRE_EXEC_DEVICE;
// TODO implement CF relax on GPUs
if (relax_points != 0)
{
exec = HYPRE_EXEC_HOST;
}
if (exec == HYPRE_EXEC_DEVICE)
{
return hypre_BoomerAMGRelaxHybridGaussSeidelDevice(A, f, cf_marker, relax_points, relax_weight, omega, NULL, u, Vtemp, Ztemp,
1 /* forward */, 0 /* nonsymm */);
}
else
#endif
{
return hypre_BoomerAMGRelaxHybridGaussSeidel_core(A, f, cf_marker, relax_points, relax_weight, omega, NULL, u, Vtemp, Ztemp,
1 /* forward */, 0 /* nonsymm */, 1 /* skip diag */, 0, 0);
}
}
/* backward hybrid G-S */
HYPRE_Int
hypre_BoomerAMGRelax4HybridGaussSeidel( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int *cf_marker,
HYPRE_Int relax_points,
HYPRE_Real relax_weight,
HYPRE_Real omega,
hypre_ParVector *u,
hypre_ParVector *Vtemp,
hypre_ParVector *Ztemp )
{
#if defined(HYPRE_USING_CUDA)
//HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_VectorMemoryLocation(x), hypre_VectorMemoryLocation(b) );
//RL: TODO back to hypre_GetExecPolicy2 later
HYPRE_ExecutionPolicy exec = HYPRE_EXEC_DEVICE;
// TODO implement CF relax on GPUs
if (relax_points != 0)
{
exec = HYPRE_EXEC_HOST;
}
if (exec == HYPRE_EXEC_DEVICE)
{
return hypre_BoomerAMGRelaxHybridGaussSeidelDevice(A, f, cf_marker, relax_points, relax_weight, omega, NULL, u, Vtemp, Ztemp,
-1 /* backward */, 0 /* nonsymm */);
}
else
#endif
{
return hypre_BoomerAMGRelaxHybridGaussSeidel_core(A, f, cf_marker, relax_points, relax_weight, omega, NULL, u, Vtemp, Ztemp,
-1 /* backward */, 0 /* nosymm */, 1 /* skip diag */, 0, 0);
}
}
/* chaotic forward G-S */
HYPRE_Int
hypre_BoomerAMGRelax5ChaoticHybridGaussSeidel( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int *cf_marker,
HYPRE_Int relax_points,
hypre_ParVector *u )
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
hypre_Vector *u_local = hypre_ParVectorLocalVector(u);
HYPRE_Complex *u_data = hypre_VectorData(u_local);
hypre_Vector *f_local = hypre_ParVectorLocalVector(f);
HYPRE_Complex *f_data = hypre_VectorData(f_local);
HYPRE_Complex *v_ext_data = NULL;
HYPRE_Complex *v_buf_data = NULL;
HYPRE_Complex zero = 0.0;
HYPRE_Complex res;
HYPRE_Int num_procs, my_id, i, j, ii, jj, index, num_sends, start;
hypre_ParCSRCommHandle *comm_handle;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
v_ext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++)
{
v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, v_buf_data, v_ext_data);
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F or All) and diagonal is
* nonzero, relax point i; otherwise, skip it.
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------*/
if ( (relax_points == 0 || cf_marker[i] == relax_points) && A_diag_data[A_diag_i[i]] != zero )
{
res = f_data[i];
for (jj = A_diag_i[i] + 1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * v_ext_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
if (num_procs > 1)
{
hypre_TFree(v_ext_data, HYPRE_MEMORY_HOST);
hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST);
}
return hypre_error_flag;
}
/* symmetric hybrid G-S */
HYPRE_Int
hypre_BoomerAMGRelax6HybridSSOR( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int *cf_marker,
HYPRE_Int relax_points,
HYPRE_Real relax_weight,
HYPRE_Real omega,
hypre_ParVector *u,
hypre_ParVector *Vtemp,
hypre_ParVector *Ztemp )
{
#if defined(HYPRE_USING_CUDA)
//HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_VectorMemoryLocation(x), hypre_VectorMemoryLocation(b) );
//RL: TODO back to hypre_GetExecPolicy2 later
HYPRE_ExecutionPolicy exec = HYPRE_EXEC_DEVICE;
// TODO implement CF relax on GPUs
if (relax_points != 0)
{
exec = HYPRE_EXEC_HOST;
}
if (exec == HYPRE_EXEC_DEVICE)
{
return hypre_BoomerAMGRelaxHybridGaussSeidelDevice(A, f, cf_marker, relax_points, relax_weight, omega, NULL, u, Vtemp, Ztemp,
1, 1 /* symm */);
}
else
#endif
{
return hypre_BoomerAMGRelaxHybridGaussSeidel_core(A, f, cf_marker, relax_points, relax_weight, omega, NULL, u, Vtemp, Ztemp,
1, 1 /* symm */, 1 /* skip diag */, 0, 0);
}
}
HYPRE_Int
hypre_BoomerAMGRelax7Jacobi( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int relax_points,
HYPRE_Real relax_weight,
HYPRE_Real *l1_norms,
hypre_ParVector *u,
hypre_ParVector *Vtemp )
{
HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A);
hypre_Vector l1_norms_vec;
hypre_ParVector l1_norms_parvec;
hypre_VectorData(&l1_norms_vec) = l1_norms;
hypre_VectorSize(&l1_norms_vec) = num_rows;
/* TODO XXX
* The next line is NOT 100% correct, which should be the memory location of l1_norms instead of f
* But how do I know it? As said, don't use raw pointers, don't use raw pointers!
* It is fine normally since A, f, and l1_norms should live in the same memory space
*/
hypre_VectorMemoryLocation(&l1_norms_vec) = hypre_ParVectorMemoryLocation(f);
hypre_ParVectorLocalVector(&l1_norms_parvec) = &l1_norms_vec;
#if defined(HYPRE_USING_CUDA)
HYPRE_Int sync_stream;
hypre_GetSyncCudaCompute(&sync_stream);
hypre_SetSyncCudaCompute(0);
#endif
/*-----------------------------------------------------------------
* Copy f into temporary vector.
*-----------------------------------------------------------------*/
hypre_ParVectorCopy(f, Vtemp);
/*-----------------------------------------------------------------
* Perform Matvec Vtemp = w * (f - Au)
*-----------------------------------------------------------------*/
hypre_ParCSRMatrixMatvec(-relax_weight, A, u, relax_weight, Vtemp);
/*-----------------------------------------------------------------
* u += D^{-1} * Vtemp, where D_ii = ||A(i,:)||_1
*-----------------------------------------------------------------*/
hypre_ParVectorElmdivpy(Vtemp, &l1_norms_parvec, u);
#if defined(HYPRE_USING_CUDA)
hypre_SetSyncCudaCompute(sync_stream);
hypre_SyncCudaComputeStream(hypre_handle());
#endif
return hypre_error_flag;
}
/* symmetric l1 hybrid G-S */
HYPRE_Int
hypre_BoomerAMGRelax8HybridL1SSOR( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int *cf_marker,
HYPRE_Int relax_points,
HYPRE_Real relax_weight,
HYPRE_Real omega,
HYPRE_Real *l1_norms,
hypre_ParVector *u,
hypre_ParVector *Vtemp,
hypre_ParVector *Ztemp )
{
const HYPRE_Int skip_diag = relax_weight == 1.0 && omega == 1.0 ? 0 : 1;
return hypre_BoomerAMGRelaxHybridGaussSeidel_core(A, f, cf_marker, relax_points, relax_weight, omega, l1_norms, u, Vtemp, Ztemp,
1, 1 /* symm */, skip_diag, 0, 0);
}
/* forward hybrid topology ordered G-S */
HYPRE_Int
hypre_BoomerAMGRelax10TopoOrderedGaussSeidel( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int *cf_marker,
HYPRE_Int relax_points,
HYPRE_Real relax_weight,
HYPRE_Real omega,
hypre_ParVector *u,
hypre_ParVector *Vtemp,
hypre_ParVector *Ztemp )
{
return hypre_BoomerAMGRelaxHybridGaussSeidel_core(A, f, cf_marker, relax_points, relax_weight, omega, NULL, u, Vtemp, Ztemp,
1 /* forward */, 0 /* nonsymm */, 1 /* skip_diag */, 1, 1);
}
/* forward l1 hybrid G-S */
HYPRE_Int
hypre_BoomerAMGRelax13HybridL1GaussSeidel( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int *cf_marker,
HYPRE_Int relax_points,
HYPRE_Real relax_weight,
HYPRE_Real omega,
HYPRE_Real *l1_norms,
hypre_ParVector *u,
hypre_ParVector *Vtemp,
hypre_ParVector *Ztemp )
{
const HYPRE_Int skip_diag = relax_weight == 1.0 && omega == 1.0 ? 0 : 1;
return hypre_BoomerAMGRelaxHybridGaussSeidel_core(A, f, cf_marker, relax_points, relax_weight, omega, l1_norms, u, Vtemp, Ztemp,
1 /* forward */, 0 /* nonsymm */, skip_diag, 0, 0 );
}
/* backward l1 hybrid G-S */
HYPRE_Int
hypre_BoomerAMGRelax14HybridL1GaussSeidel( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int *cf_marker,
HYPRE_Int relax_points,
HYPRE_Real relax_weight,
HYPRE_Real omega,
HYPRE_Real *l1_norms,
hypre_ParVector *u,
hypre_ParVector *Vtemp,
hypre_ParVector *Ztemp )
{
const HYPRE_Int skip_diag = relax_weight == 1.0 && omega == 1.0 ? 0 : 1;
return hypre_BoomerAMGRelaxHybridGaussSeidel_core(A, f, cf_marker, relax_points, relax_weight, omega, l1_norms, u, Vtemp, Ztemp,
-1 /* backward */, 0 /* nonsymm */, skip_diag, 0, 0 );
}
HYPRE_Int
hypre_BoomerAMGRelax19GaussElim( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
hypre_ParVector *u )
{
HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_BigInt first_ind = hypre_ParVectorFirstIndex(u);
HYPRE_Int n_global = (HYPRE_Int) global_num_rows;
HYPRE_Int first_index = (HYPRE_Int) first_ind;
HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A);
hypre_Vector *u_local = hypre_ParVectorLocalVector(u);
HYPRE_Complex *u_data = hypre_VectorData(u_local);
hypre_CSRMatrix *A_CSR;
HYPRE_Int *A_CSR_i;
HYPRE_Int *A_CSR_j;
HYPRE_Real *A_CSR_data;
hypre_Vector *f_vector;
HYPRE_Real *f_vector_data;
HYPRE_Real *A_mat;
HYPRE_Real *b_vec;
HYPRE_Int i, jj, column, relax_error = 0;
/*-----------------------------------------------------------------
* Generate CSR matrix from ParCSRMatrix A
*-----------------------------------------------------------------*/
/* all processors are needed for these routines */
A_CSR = hypre_ParCSRMatrixToCSRMatrixAll(A);
f_vector = hypre_ParVectorToVectorAll(f);
if (num_rows)
{
A_CSR_i = hypre_CSRMatrixI(A_CSR);
A_CSR_j = hypre_CSRMatrixJ(A_CSR);
A_CSR_data = hypre_CSRMatrixData(A_CSR);
f_vector_data = hypre_VectorData(f_vector);
A_mat = hypre_CTAlloc(HYPRE_Real, n_global*n_global, HYPRE_MEMORY_HOST);
b_vec = hypre_CTAlloc(HYPRE_Real, n_global, HYPRE_MEMORY_HOST);
/*---------------------------------------------------------------
* Load CSR matrix into A_mat.
*---------------------------------------------------------------*/
for (i = 0; i < n_global; i++)
{
for (jj = A_CSR_i[i]; jj < A_CSR_i[i+1]; jj++)
{
column = A_CSR_j[jj];
A_mat[i*n_global+column] = A_CSR_data[jj];
}
b_vec[i] = f_vector_data[i];
}
hypre_gselim(A_mat, b_vec, n_global, relax_error);
for (i = 0; i < num_rows; i++)
{
u_data[i] = b_vec[first_index + i];
}
hypre_TFree(A_mat, HYPRE_MEMORY_HOST);
hypre_TFree(b_vec, HYPRE_MEMORY_HOST);
hypre_CSRMatrixDestroy(A_CSR);
A_CSR = NULL;
hypre_SeqVectorDestroy(f_vector);
f_vector = NULL;
}
else
{
hypre_CSRMatrixDestroy(A_CSR);
A_CSR = NULL;
hypre_SeqVectorDestroy(f_vector);
f_vector = NULL;
}
return relax_error;
}
HYPRE_Int
hypre_BoomerAMGRelax98GaussElimPivot( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
hypre_ParVector *u )
{
HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_BigInt first_ind = hypre_ParVectorFirstIndex(u);
HYPRE_Int n_global = (HYPRE_Int) global_num_rows;
HYPRE_Int first_index = (HYPRE_Int) first_ind;
HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A);
hypre_Vector *u_local = hypre_ParVectorLocalVector(u);
HYPRE_Complex *u_data = hypre_VectorData(u_local);
hypre_CSRMatrix *A_CSR;
HYPRE_Int *A_CSR_i;
HYPRE_Int *A_CSR_j;
HYPRE_Real *A_CSR_data;
hypre_Vector *f_vector;
HYPRE_Real *f_vector_data;
HYPRE_Real *A_mat;
HYPRE_Real *b_vec;
HYPRE_Int i, jj, column, relax_error = 0;
HYPRE_Int info;
HYPRE_Int one_i = 1;
HYPRE_Int *piv;
/*-----------------------------------------------------------------
* Generate CSR matrix from ParCSRMatrix A
*-----------------------------------------------------------------*/
/* all processors are needed for these routines */
A_CSR = hypre_ParCSRMatrixToCSRMatrixAll(A);
f_vector = hypre_ParVectorToVectorAll(f);
if (num_rows)
{
A_CSR_i = hypre_CSRMatrixI(A_CSR);
A_CSR_j = hypre_CSRMatrixJ(A_CSR);
A_CSR_data = hypre_CSRMatrixData(A_CSR);
f_vector_data = hypre_VectorData(f_vector);
A_mat = hypre_CTAlloc(HYPRE_Real, n_global*n_global, HYPRE_MEMORY_HOST);
b_vec = hypre_CTAlloc(HYPRE_Real, n_global, HYPRE_MEMORY_HOST);
/*---------------------------------------------------------------
* Load CSR matrix into A_mat.
*---------------------------------------------------------------*/
for (i = 0; i < n_global; i++)
{
for (jj = A_CSR_i[i]; jj < A_CSR_i[i+1]; jj++)
{
/* need col major */
column = A_CSR_j[jj];
A_mat[i + n_global*column] = A_CSR_data[jj];
}
b_vec[i] = f_vector_data[i];
}
piv = hypre_CTAlloc(HYPRE_Int, n_global, HYPRE_MEMORY_HOST);
/* write over A with LU */
hypre_dgetrf(&n_global, &n_global, A_mat, &n_global, piv, &info);
/*now b_vec = inv(A)*b_vec */
hypre_dgetrs("N", &n_global, &one_i, A_mat, &n_global, piv, b_vec, &n_global, &info);
hypre_TFree(piv, HYPRE_MEMORY_HOST);
for (i = 0; i < num_rows; i++)
{
u_data[i] = b_vec[first_index+i];
}
hypre_TFree(A_mat, HYPRE_MEMORY_HOST);
hypre_TFree(b_vec, HYPRE_MEMORY_HOST);
hypre_CSRMatrixDestroy(A_CSR);
A_CSR = NULL;
hypre_SeqVectorDestroy(f_vector);
f_vector = NULL;
}
else
{
hypre_CSRMatrixDestroy(A_CSR);
A_CSR = NULL;
hypre_SeqVectorDestroy(f_vector);
f_vector = NULL;
}
return relax_error;
}
HYPRE_Int
hypre_BoomerAMGRelaxKaczmarz( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Real omega,
HYPRE_Real *l1_norms,
hypre_ParVector *u )
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
hypre_Vector *u_local = hypre_ParVectorLocalVector(u);
HYPRE_Complex *u_data = hypre_VectorData(u_local);
hypre_Vector *f_local = hypre_ParVectorLocalVector(f);
HYPRE_Complex *f_data = hypre_VectorData(f_local);
HYPRE_Complex *u_offd_data = NULL;
HYPRE_Complex *u_buf_data = NULL;
HYPRE_Complex res;
HYPRE_Int num_procs, my_id, i, j, index, num_sends, start;
hypre_ParCSRCommHandle *comm_handle;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
if (num_procs > 1)
{
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
u_buf_data = hypre_TAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
u_offd_data = hypre_TAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
u_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, u_buf_data, u_offd_data);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(u_buf_data, HYPRE_MEMORY_HOST);
}
/* Forward local pass */
for (i = 0; i < num_rows; i++)
{
res = f_data[i];
for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
res -= A_diag_data[j] * u_data[A_diag_j[j]];
}
for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++)
{
res -= A_offd_data[j] * u_offd_data[A_offd_j[j]];
}
res /= l1_norms[i];
for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
u_data[A_diag_j[j]] += omega * res * A_diag_data[j];
}
}
/* Backward local pass */
for (i = num_rows - 1; i > -1; i--)
{
res = f_data[i];
for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
res -= A_diag_data[j] * u_data[A_diag_j[j]];
}
for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++)
{
res -= A_offd_data[j] * u_offd_data[A_offd_j[j]];
}
res /= l1_norms[i];
for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
u_data[A_diag_j[j]] += omega * res * A_diag_data[j];
}
}
hypre_TFree(u_offd_data, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGRelax11TwoStageGaussSeidelHost( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Real relax_weight,
HYPRE_Real omega,
hypre_ParVector *u,
hypre_ParVector *Vtemp )
{
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp);
HYPRE_Complex *Vtemp_data = hypre_VectorData(Vtemp_local);
hypre_Vector *u_local = hypre_ParVectorLocalVector(u);
HYPRE_Complex *u_data = hypre_VectorData(u_local);
HYPRE_Int i, jj, ii;
hypre_ParCSRMatrixMatvecOutOfPlace(-relax_weight, A, u, relax_weight, f, Vtemp);
/* Need to check that EVERY diagonal is nonzero first. If any are, throw exception */
for (i = 0; i < num_rows; i++)
{
if (A_diag_data[A_diag_i[i]] == 0.0)
{
hypre_error_in_arg(1);
}
}
for (i = 0; i < num_rows; i++) /* Run the smoother */
{
HYPRE_Complex res = 0.0;
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii < i)
{
res -= (A_diag_data[jj] / A_diag_data[A_diag_i[ii]]) * Vtemp_data[ii];
}
}
u_data[i] += (Vtemp_data[i] + omega*res) / A_diag_data[A_diag_i[i]];
}
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGRelax11TwoStageGaussSeidel( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int *cf_marker,
HYPRE_Int relax_points,
HYPRE_Real relax_weight,
HYPRE_Real omega,
hypre_ParVector *u,
hypre_ParVector *Vtemp,
hypre_ParVector *Ztemp )
{
#if defined(HYPRE_USING_CUDA)
//HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_VectorMemoryLocation(x), hypre_VectorMemoryLocation(b) );
//RL: TODO back to hypre_GetExecPolicy2 later
HYPRE_ExecutionPolicy exec = HYPRE_EXEC_DEVICE;
if (exec == HYPRE_EXEC_DEVICE)
{
hypre_BoomerAMGRelaxTwoStageGaussSeidelDevice(A, f, relax_weight, omega, u, Vtemp, Ztemp, 1);
}
else
#endif
{
hypre_BoomerAMGRelax11TwoStageGaussSeidelHost(A, f, relax_weight, omega, u, Vtemp);
}
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGRelax12TwoStageGaussSeidelHost( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Real relax_weight,
HYPRE_Real omega,
hypre_ParVector *u,
hypre_ParVector *Vtemp )
{
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp);
HYPRE_Complex *Vtemp_data = hypre_VectorData(Vtemp_local);
hypre_Vector *u_local = hypre_ParVectorLocalVector(u);
HYPRE_Complex *u_data = hypre_VectorData(u_local);
HYPRE_Int i, jj, ii;
hypre_ParCSRMatrixMatvecOutOfPlace(-relax_weight, A, u, relax_weight, f, Vtemp);
/* Need to check that EVERY diagonal is nonzero first. If any are, throw exception */
for (i = 0; i < num_rows; i++)
{
if (A_diag_data[A_diag_i[i]] == 0.0)
{
hypre_error_in_arg(1);
}
}
for (i = 0; i < num_rows; i++) /* Run the smoother */
{
HYPRE_Complex res = Vtemp_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= (A_diag_data[jj] / A_diag_data[A_diag_i[ii]]) * Vtemp_data[ii];
}
u_data[i] += (Vtemp_data[i] + omega*res) / A_diag_data[A_diag_i[i]];
}
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGRelax12TwoStageGaussSeidel( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int *cf_marker,
HYPRE_Int relax_points,
HYPRE_Real relax_weight,
HYPRE_Real omega,
hypre_ParVector *u,
hypre_ParVector *Vtemp,
hypre_ParVector *Ztemp )
{
#if defined(HYPRE_USING_CUDA)
//HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_VectorMemoryLocation(x), hypre_VectorMemoryLocation(b) );
//RL: TODO back to hypre_GetExecPolicy2 later
HYPRE_ExecutionPolicy exec = HYPRE_EXEC_DEVICE;
if (exec == HYPRE_EXEC_DEVICE)
{
hypre_BoomerAMGRelaxTwoStageGaussSeidelDevice(A, f, relax_weight, omega, u, Vtemp, Ztemp, 0);
}
else
#endif
{
hypre_BoomerAMGRelax12TwoStageGaussSeidelHost(A, f, relax_weight, omega, u, Vtemp);
}
return hypre_error_flag;
}
|
metadirective_device_kind_codegen.c | // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c -triple x86_64-unknown-linux -emit-llvm %s -o - | FileCheck %s
// RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c -triple aarch64-unknown-linux -emit-llvm %s -o - | FileCheck %s
// RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c -triple ppc64le-unknown-linux -emit-llvm %s -o - | FileCheck %s
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
void bar(void);
void foo(void) {
#pragma omp metadirective when(device = {kind(any)} \
: parallel)
bar();
#pragma omp metadirective when(device = {kind(host, cpu)} \
: parallel for num_threads(4))
for (int i = 0; i < 100; i++)
;
#pragma omp metadirective when(device = {kind(host)} \
: parallel for)
for (int i = 0; i < 100; i++)
;
#pragma omp metadirective when(device = {kind(nohost, gpu)} \
:) when(device = {kind(cpu)} \
: parallel)
bar();
#pragma omp metadirective when(device = {kind(any, cpu)} \
: parallel)
bar();
#pragma omp metadirective when(device = {kind(any, host)} \
: parallel)
bar();
#pragma omp metadirective when(device = {kind(gpu)} \
: target parallel for) default(parallel for)
for (int i = 0; i < 100; i++)
;
}
// CHECK-LABEL: define {{.+}} void @foo()
// CHECK: @__kmpc_fork_call(%struct.ident_t* {{.+}}, i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* [[OUTLINED_1:@.+]] to void
// CHECK-NEXT: @__kmpc_push_num_threads
// CHECK: @__kmpc_fork_call(%struct.ident_t* {{.+}}, i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* [[OUTLINED_2:@.+]] to void
// CHECK: @__kmpc_fork_call(%struct.ident_t* {{.+}}, i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* [[OUTLINED_3:@.+]] to void
// CHECK: @__kmpc_fork_call(%struct.ident_t* {{.+}}, i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* [[OUTLINED_4:@.+]] to void
// CHECK: @__kmpc_fork_call(%struct.ident_t* {{.+}}, i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* [[OUTLINED_5:@.+]] to void
// CHECK: @__kmpc_fork_call(%struct.ident_t* {{.+}}, i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* [[OUTLINED_6:@.+]] to void
// CHECK: @__kmpc_fork_call(%struct.ident_t* {{.+}}, i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* [[OUTLINED_7:@.+]] to void
// CHECK: ret void
// CHECK: define internal void [[OUTLINED_1]](
// CHECK: call void @bar
// CHECK: ret void
// CHECK: define internal void [[OUTLINED_2]](
// CHECK: call void @__kmpc_for_static_init
// CHECK: call void @__kmpc_for_static_fini
// CHECK: ret void
// CHECK: define internal void [[OUTLINED_3]](
// CHECK: call void @__kmpc_for_static_init
// CHECK: call void @__kmpc_for_static_fini
// CHECK: ret void
// CHECK: define internal void [[OUTLINED_4]](
// CHECK: call void @bar
// CHECK: ret void
// CHECK: define internal void [[OUTLINED_5]](
// CHECK: call void @bar
// CHECK: ret void
// CHECK: define internal void [[OUTLINED_6]](
// CHECK: call void @bar
// CHECK: ret void
// CHECK: define internal void [[OUTLINED_7]](
// CHECK: call void @__kmpc_for_static_init
// CHECK: call void @__kmpc_for_static_fini
// CHECK: ret void
#endif
|
hello2.c | #include <omp.h>
main () {
int nthreads, tid;
/* Fork a team of threads with each thread having a private tid variable */
#pragma omp parallel private(tid)
{
/* Obtain and print thread id */
tid = omp_get_thread_num();
printf("Hello World from thread = %d\n", tid);
/* Only master thread does this */
if (tid == 0)
{
nthreads = omp_get_num_threads();
printf("Number of threads = %d\n", nthreads);
}
} /* All threads join master thread and terminate */
}
|
problem.p6.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
void evaluateBeta(double x, double y, double z, double *B, double *Bx, double *By, double *Bz){
double Bmin = 1.0;
double Bmax = 10.0;
double c2 = (Bmax-Bmin)/2; // coefficients to affect this transition
double c1 = (Bmax+Bmin)/2;
double c3 = 10.0; // how sharply (B)eta transitions
double xcenter = 0.50;
double ycenter = 0.50;
double zcenter = 0.50;
// calculate distance from center of the domain (0.5,0.5,0.5)
double r2 = pow((x-xcenter),2) + pow((y-ycenter),2) + pow((z-zcenter),2);
double r2x = 2.0*(x-xcenter);
double r2y = 2.0*(y-ycenter);
double r2z = 2.0*(z-zcenter);
//double r2xx = 2.0;
//double r2yy = 2.0;
//double r2zz = 2.0;
double r = pow(r2,0.5);
double rx = 0.5*r2x*pow(r2,-0.5);
double ry = 0.5*r2y*pow(r2,-0.5);
double rz = 0.5*r2z*pow(r2,-0.5);
//double rxx = 0.5*r2xx*pow(r2,-0.5) - 0.25*r2x*r2x*pow(r2,-1.5);
//double ryy = 0.5*r2yy*pow(r2,-0.5) - 0.25*r2y*r2y*pow(r2,-1.5);
//double rzz = 0.5*r2zz*pow(r2,-0.5) - 0.25*r2z*r2z*pow(r2,-1.5);
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
*B = c1+c2*tanh( c3*(r-0.25) );
*Bx = c2*c3*rx*(1-pow(tanh( c3*(r-0.25) ),2));
*By = c2*c3*ry*(1-pow(tanh( c3*(r-0.25) ),2));
*Bz = c2*c3*rz*(1-pow(tanh( c3*(r-0.25) ),2));
}
//------------------------------------------------------------------------------------------------------------------------------
void evaluateU(double x, double y, double z, double *U, double *Ux, double *Uy, double *Uz, double *Uxx, double *Uyy, double *Uzz, int isPeriodic){
// should be continuous in u, u', u'', u''', and u'''' to guarantee high order and periodic boundaries
// v(w) = ???
// u(x,y,z) = v(x)v(y)v(z)
// If Periodic, then the integral of the RHS should sum to zero.
// Setting shift=1.0 should ensure that the integrals of X, Y, or Z should sum to zero...
// That should(?) make the integrals of u,ux,uy,uz,uxx,uyy,uzz sum to zero and thus make the integral of f sum to zero
// If dirichlet, then w(0)=w(1) = 0.0
// Setting shift to 0 should ensure that U(x,y,z) = 0 on boundary
// u = ax^6 + bx^5 + cx^4 + dx^3 + ex^2 + fx + g
// ux = 6ax^5 + 5bx^4 + 4cx^3 + 3dx^2 + 2ex + f
// uxx = 30ax^4 + 20bx^3 + 12cx^2 + 6dx + 2e
// a = 42.0
// b = -126.0
// c = 105.0
// d = 0.0
// e = -21.0
// f = 0.0
// g = 1.0
double shift = 0.0;if(isPeriodic)shift= 1.0/21.0;
double X = 2.0*pow(x,6) - 6.0*pow(x,5) + 5.0*pow(x,4) - 1.0*pow(x,2) + shift;
double Y = 2.0*pow(y,6) - 6.0*pow(y,5) + 5.0*pow(y,4) - 1.0*pow(y,2) + shift;
double Z = 2.0*pow(z,6) - 6.0*pow(z,5) + 5.0*pow(z,4) - 1.0*pow(z,2) + shift;
double Xx = 12.0*pow(x,5) - 30.0*pow(x,4) + 20.0*pow(x,3) - 2.0*x;
double Yy = 12.0*pow(y,5) - 30.0*pow(y,4) + 20.0*pow(y,3) - 2.0*y;
double Zz = 12.0*pow(z,5) - 30.0*pow(z,4) + 20.0*pow(z,3) - 2.0*z;
double Xxx = 60.0*pow(x,4) - 120.0*pow(x,3) + 60.0*pow(x,2) - 2.0;
double Yyy = 60.0*pow(y,4) - 120.0*pow(y,3) + 60.0*pow(y,2) - 2.0;
double Zzz = 60.0*pow(z,4) - 120.0*pow(z,3) + 60.0*pow(z,2) - 2.0;
*U = X * Y * Z;
*Ux = Xx * Y * Z;
*Uy = X * Yy * Z;
*Uz = X * Y * Zz;
*Uxx = Xxx * Y * Z;
*Uyy = X * Yyy * Z;
*Uzz = X * Y * Zzz;
}
//------------------------------------------------------------------------------------------------------------------------------
void initialize_problem(level_type * level, double hLevel, double a, double b){
level->h = hLevel;
int box;
for(box=0;box<level->num_my_boxes;box++){
int i,j,k;
const int jStride = level->my_boxes[box].jStride;
const int kStride = level->my_boxes[box].kStride;
const int ghosts = level->my_boxes[box].ghosts;
const int dim_i = level->my_boxes[box].dim;
const int dim_j = level->my_boxes[box].dim;
const int dim_k = level->my_boxes[box].dim;
#ifdef _OPENMP
#pragma omp parallel for private(k,j,i) collapse(3)
#endif
for(k=0;k<=dim_k;k++){ // include high face
for(j=0;j<=dim_j;j++){ // include high face
for(i=0;i<=dim_i;i++){ // include high face
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
int ijk = (i+ghosts) + (j+ghosts)*jStride + (k+ghosts)*kStride;
double x = hLevel*( (double)(i+level->my_boxes[box].low.i) + 0.5 ); // +0.5 to get to the center of cell
double y = hLevel*( (double)(j+level->my_boxes[box].low.j) + 0.5 );
double z = hLevel*( (double)(k+level->my_boxes[box].low.k) + 0.5 );
double A,B,Bx,By,Bz,Bi,Bj,Bk;
double U,Ux,Uy,Uz,Uxx,Uyy,Uzz;
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
A = 1.0;
B = 1.0;
Bx = 0.0;
By = 0.0;
Bz = 0.0;
Bi = 1.0;
Bj = 1.0;
Bk = 1.0;
#ifdef STENCIL_VARIABLE_COEFFICIENT // variable coefficient problem...
evaluateBeta(x-hLevel*0.5,y ,z ,&Bi,&Bx,&By,&Bz); // face-centered value of Beta for beta_i
evaluateBeta(x ,y-hLevel*0.5,z ,&Bj,&Bx,&By,&Bz); // face-centered value of Beta for beta_j
evaluateBeta(x ,y ,z-hLevel*0.5,&Bk,&Bx,&By,&Bz); // face-centered value of Beta for beta_k
evaluateBeta(x ,y ,z ,&B ,&Bx,&By,&Bz); // cell-centered value of Beta
#endif
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
evaluateU(x,y,z,&U,&Ux,&Uy,&Uz,&Uxx,&Uyy,&Uzz, (level->boundary_condition.type == BC_PERIODIC) );
double F = a*A*U - b*( (Bx*Ux + By*Uy + Bz*Uz) + B*(Uxx + Uyy + Uzz) );
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
level->my_boxes[box].vectors[VECTOR_BETA_I][ijk] = Bi;
level->my_boxes[box].vectors[VECTOR_BETA_J][ijk] = Bj;
level->my_boxes[box].vectors[VECTOR_BETA_K][ijk] = Bk;
level->my_boxes[box].vectors[VECTOR_ALPHA ][ijk] = A;
//level->my_boxes[box].vectors[VECTOR_UTRUE ][ijk] = U; // obviated by Richardson analysis
level->my_boxes[box].vectors[VECTOR_F ][ijk] = F;
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
}}}
}
}
//------------------------------------------------------------------------------------------------------------------------------
|
cholesky_omp.c | /**
* This version is stamped on May 10, 2016
*
* Contact:
* Louis-Noel Pouchet <pouchet.ohio-state.edu>
* Tomofumi Yuki <tomofumi.yuki.fr>
*
* Web address: http://polybench.sourceforge.net
*/
/* cholesky.c: this file is part of PolyBench/C */
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
#include <omp.h>
#include "util.h"
#include "papi.h"
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
#include "cholesky.h"
double **I, **O;
double **Aux;
int size;
int nthreads;
/* Array initialization. */
static void init_array(int n, DATA_TYPE POLYBENCH_2D(A,N,N,n,n)){
I = (double **)malloc(n * sizeof(double));
O = (double **)calloc(n, sizeof(double));
Aux = (double **)malloc(n * sizeof(double));
int i, j;
for (i = 0; i < n; i++){
I[i] = (double*)malloc(n * sizeof(double));
O[i] = (double*)calloc(n, sizeof(double));
Aux[i] = (double*)malloc(n * sizeof(double));
for (j = 0; j <= i; j++){
A[i][j] = (DATA_TYPE)(-j % n) / n + 1;
I[i][j] = (DATA_TYPE)(-j % n) / n + 1;
// O[i][j] = (DATA_TYPE)(-j % n) / n + 1;
}
for (j = i+1; j < n; j++) {
A[i][j] = 0;
I[i][j] = A[i][j];
}
A[i][i] = 1;
I[i][i] = A[i][i];
}
/* Make the matrix positive semi-definite. */
int r,s,t;
for (r = 0; r < n; ++r)
for (s = 0; s < n; ++s)
Aux[r][s] = 0;
for (t = 0; t < n; ++t)
for (r = 0; r < n; ++r)
for (s = 0; s < n; ++s)
Aux[r][s] += I[r][t] * I[s][t];
for (r = 0; r < n; ++r)
for (s = 0; s < n; ++s){
I[r][s] = Aux[r][s];
}
free2D(Aux);
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static void print_array(int n, DATA_TYPE POLYBENCH_2D(A,N,N,n,n)){
int i, j;
POLYBENCH_DUMP_START;
POLYBENCH_DUMP_BEGIN("A");
for (i = 0; i < n; i++)
for (j = 0; j <= i; j++) {
if ((i * n + j) % 20 == 0) fprintf (POLYBENCH_DUMP_TARGET, "\n");
fprintf (POLYBENCH_DUMP_TARGET, DATA_PRINTF_MODIFIER, A[i][j]);
}
POLYBENCH_DUMP_END("A");
POLYBENCH_DUMP_FINISH;
}
static void cholesky_row_lower(){
int i, j, k;
for(k = 0; k < size; k++){
I[k][k] = sqrtf(I[k][k]);
for(j = (k + 1); j < size; j++){
I[k][j] /= I[k][k];
I[j][k] = I[k][j];
}
#pragma omp parallel for shared(I) private(i,j) num_threads(nthreads)
for(i = (k + 1); i < size; i++){
for(j = i; j < size; j++){
I[i][j] -= I[k][i] * I[k][j];
I[j][i] = I[i][j];
}
}
}
for(i = 0; i < size; i++){
for(j = i + 1; j < size; j++){
I[i][j] = 0.0;
}
}
}
static void cholesky_crout(){
int i, j, k;
double sum;
#pragma scop
for (j = 0; j < size; j++) {
sum = 0;
for (k = 0; k < j; k++) {
sum += O[j][k] * O[j][k];
}
O[j][j] = SQRT_FUN(I[j][j] - sum);
#pragma omp parallel for private(i, k, sum) shared(I, O, j) num_threads(nthreads)
for (i = j + 1; i < size; i++) {
sum = 0;
for (k = 0; k < j; k++) {
sum += O[i][k] * O[j][k];
}
O[i][j] = (1.0 / O[j][j] * (I[i][j] - sum));
}
}
#pragma endscop
}
int main(int argc, char** argv){
if(argc < 2){
printf("The program must have an argument to be executed. ./cholesky_omp.out $nthreads\n");
return -1;
}
nthreads = atoi(argv[1]);
/* Retrieve problem size. */
size = N;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, N, N, size, size);
/* Initialize array(s). */
init_array(size, POLYBENCH_ARRAY(A));
/* Start timer. */
polybench_start_instruments;
// printMatrix(I, size);
int counters[5] = {PAPI_L1_TCM, PAPI_L2_TCM, PAPI_L3_TCM, PAPI_TOT_CYC, PAPI_TOT_INS}, ret;
long long values[5];
// int counters[2] = {PAPI_TOT_CYC, PAPI_TOT_INS}, ret;
if ((ret = PAPI_start_counters(counters, 5)) != PAPI_OK) {
fprintf(stderr, "PAPI failed to start counters: %s\n", PAPI_strerror(ret));
exit(1);
}
BEGINTIME();
/* Run kernel. */
cholesky_row_lower();
// kernel_cholesky (n, POLYBENCH_ARRAY(A));
printf("ELAPSED TIME: ");
ENDTIME();
// printMatrix(I, size);
if ((ret = PAPI_read_counters(values, 5)) != PAPI_OK) {
fprintf(stderr, "PAPI failed to read counters: %s\n", PAPI_strerror(ret));
exit(1);
}
printf("TOTAL L1 MISS: %lld\n", values[0]);
printf("TOTAL L2 MISS: %lld\n", values[1]);
printf("TOTAL L3 MISS: %lld\n", values[2]);
printf("TOTAL CLOCK CYCLES: %lld\n", values[3]);
printf("TOTAL INSTRUCTIONS: %lld\n", values[4]);
printf("--------------------------------------\n");
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(size, POLYBENCH_ARRAY(A)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
free2D(I);
free2D(O);
return 0;
}
|
tree-pretty-print.c | /* Pretty formatting of GENERIC trees in C syntax.
Copyright (C) 2001-2020 Free Software Foundation, Inc.
Adapted from c-pretty-print.c by Diego Novillo <dnovillo@redhat.com>
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "backend.h"
#include "rtl.h"
#include "tree.h"
#include "predict.h"
#include "cgraph.h"
#include "tree-pretty-print.h"
#include "stor-layout.h"
#include "langhooks.h"
#include "tree-iterator.h"
#include "dumpfile.h"
#include "internal-fn.h"
#include "gomp-constants.h"
#include "gimple.h"
#include "fold-const.h"
/* Disable warnings about quoting issues in the pp_xxx calls below
that (intentionally) don't follow GCC diagnostic conventions. */
#if __GNUC__ >= 10
# pragma GCC diagnostic push
# pragma GCC diagnostic ignored "-Wformat-diag"
#endif
/* Local functions, macros and variables. */
static const char *op_symbol (const_tree);
static void newline_and_indent (pretty_printer *, int);
static void maybe_init_pretty_print (FILE *);
static void print_struct_decl (pretty_printer *, const_tree, int, dump_flags_t);
static void do_niy (pretty_printer *, const_tree, dump_flags_t);
#define INDENT(SPACE) do { \
int i; for (i = 0; i<SPACE; i++) pp_space (pp); } while (0)
#define NIY do_niy (pp, node, flags)
static pretty_printer *tree_pp;
/* Try to print something for an unknown tree code. */
static void
do_niy (pretty_printer *pp, const_tree node, dump_flags_t flags)
{
int i, len;
pp_string (pp, "<<< Unknown tree: ");
pp_string (pp, get_tree_code_name (TREE_CODE (node)));
if (EXPR_P (node))
{
len = TREE_OPERAND_LENGTH (node);
for (i = 0; i < len; ++i)
{
newline_and_indent (pp, 2);
dump_generic_node (pp, TREE_OPERAND (node, i), 2, flags, false);
}
}
pp_string (pp, " >>>");
}
/* Debugging function to print out a generic expression. */
DEBUG_FUNCTION void
debug_generic_expr (tree t)
{
print_generic_expr (stderr, t, TDF_VOPS|TDF_MEMSYMS);
fprintf (stderr, "\n");
}
/* Debugging function to print out a generic statement. */
DEBUG_FUNCTION void
debug_generic_stmt (tree t)
{
print_generic_stmt (stderr, t, TDF_VOPS|TDF_MEMSYMS);
fprintf (stderr, "\n");
}
/* Debugging function to print out a chain of trees . */
DEBUG_FUNCTION void
debug_tree_chain (tree t)
{
hash_set<tree> seen;
while (t)
{
print_generic_expr (stderr, t, TDF_VOPS|TDF_MEMSYMS|TDF_UID);
fprintf (stderr, " ");
t = TREE_CHAIN (t);
if (seen.add (t))
{
fprintf (stderr, "... [cycled back to ");
print_generic_expr (stderr, t, TDF_VOPS|TDF_MEMSYMS|TDF_UID);
fprintf (stderr, "]");
break;
}
}
fprintf (stderr, "\n");
}
/* Prints declaration DECL to the FILE with details specified by FLAGS. */
void
print_generic_decl (FILE *file, tree decl, dump_flags_t flags)
{
maybe_init_pretty_print (file);
print_declaration (tree_pp, decl, 2, flags);
pp_write_text_to_stream (tree_pp);
}
/* Print tree T, and its successors, on file FILE. FLAGS specifies details
to show in the dump. See TDF_* in dumpfile.h. */
void
print_generic_stmt (FILE *file, tree t, dump_flags_t flags)
{
maybe_init_pretty_print (file);
dump_generic_node (tree_pp, t, 0, flags, true);
pp_newline_and_flush (tree_pp);
}
/* Print tree T, and its successors, on file FILE. FLAGS specifies details
to show in the dump. See TDF_* in dumpfile.h. The output is indented by
INDENT spaces. */
void
print_generic_stmt_indented (FILE *file, tree t, dump_flags_t flags, int indent)
{
int i;
maybe_init_pretty_print (file);
for (i = 0; i < indent; i++)
pp_space (tree_pp);
dump_generic_node (tree_pp, t, indent, flags, true);
pp_newline_and_flush (tree_pp);
}
/* Print a single expression T on file FILE. FLAGS specifies details to show
in the dump. See TDF_* in dumpfile.h. */
void
print_generic_expr (FILE *file, tree t, dump_flags_t flags)
{
maybe_init_pretty_print (file);
dump_generic_node (tree_pp, t, 0, flags, false);
pp_flush (tree_pp);
}
/* Print a single expression T to string, and return it. */
char *
print_generic_expr_to_str (tree t)
{
pretty_printer pp;
dump_generic_node (&pp, t, 0, TDF_VOPS|TDF_MEMSYMS, false);
return xstrdup (pp_formatted_text (&pp));
}
/* Dump NAME, an IDENTIFIER_POINTER, sanitized so that D<num> sequences
in it are replaced with Dxxxx, as long as they are at the start or
preceded by $ and at the end or followed by $. See make_fancy_name
in tree-sra.c. */
static void
dump_fancy_name (pretty_printer *pp, tree name)
{
int cnt = 0;
int length = IDENTIFIER_LENGTH (name);
const char *n = IDENTIFIER_POINTER (name);
do
{
n = strchr (n, 'D');
if (n == NULL)
break;
if (ISDIGIT (n[1])
&& (n == IDENTIFIER_POINTER (name) || n[-1] == '$'))
{
int l = 2;
while (ISDIGIT (n[l]))
l++;
if (n[l] == '\0' || n[l] == '$')
{
cnt++;
length += 5 - l;
}
n += l;
}
else
n++;
}
while (1);
if (cnt == 0)
{
pp_tree_identifier (pp, name);
return;
}
char *str = XNEWVEC (char, length + 1);
char *p = str;
const char *q;
q = n = IDENTIFIER_POINTER (name);
do
{
q = strchr (q, 'D');
if (q == NULL)
break;
if (ISDIGIT (q[1])
&& (q == IDENTIFIER_POINTER (name) || q[-1] == '$'))
{
int l = 2;
while (ISDIGIT (q[l]))
l++;
if (q[l] == '\0' || q[l] == '$')
{
memcpy (p, n, q - n);
memcpy (p + (q - n), "Dxxxx", 5);
p += (q - n) + 5;
n = q + l;
}
q += l;
}
else
q++;
}
while (1);
memcpy (p, n, IDENTIFIER_LENGTH (name) - (n - IDENTIFIER_POINTER (name)));
str[length] = '\0';
if (pp_translate_identifiers (pp))
{
const char *text = identifier_to_locale (str);
pp_append_text (pp, text, text + strlen (text));
}
else
pp_append_text (pp, str, str + length);
XDELETEVEC (str);
}
/* Dump the name of a _DECL node and its DECL_UID if TDF_UID is set
in FLAGS. */
static void
dump_decl_name (pretty_printer *pp, tree node, dump_flags_t flags)
{
tree name = DECL_NAME (node);
if (name)
{
if ((flags & TDF_ASMNAME)
&& HAS_DECL_ASSEMBLER_NAME_P (node)
&& DECL_ASSEMBLER_NAME_SET_P (node))
pp_tree_identifier (pp, DECL_ASSEMBLER_NAME_RAW (node));
/* For -fcompare-debug don't dump DECL_NAMELESS names at all,
-g might have created more fancy names and their indexes
could get out of sync. Usually those should be DECL_IGNORED_P
too, SRA can create even non-DECL_IGNORED_P DECL_NAMELESS fancy
names, let's hope those never get out of sync after doing the
dump_fancy_name sanitization. */
else if ((flags & TDF_COMPARE_DEBUG)
&& DECL_NAMELESS (node)
&& DECL_IGNORED_P (node))
name = NULL_TREE;
/* For DECL_NAMELESS names look for embedded uids in the
names and sanitize them for TDF_NOUID. */
else if ((flags & TDF_NOUID) && DECL_NAMELESS (node))
dump_fancy_name (pp, name);
else
pp_tree_identifier (pp, name);
}
char uid_sep = (flags & TDF_GIMPLE) ? '_' : '.';
if ((flags & TDF_UID) || name == NULL_TREE)
{
if (TREE_CODE (node) == LABEL_DECL && LABEL_DECL_UID (node) != -1)
pp_printf (pp, "L%c%d", uid_sep, (int) LABEL_DECL_UID (node));
else if (TREE_CODE (node) == DEBUG_EXPR_DECL)
{
if (flags & TDF_NOUID)
pp_string (pp, "D#xxxx");
else
pp_printf (pp, "D#%i", DEBUG_TEMP_UID (node));
}
else
{
char c = TREE_CODE (node) == CONST_DECL ? 'C' : 'D';
if (flags & TDF_NOUID)
pp_printf (pp, "%c.xxxx", c);
else
pp_printf (pp, "%c%c%u", c, uid_sep, DECL_UID (node));
}
}
if ((flags & TDF_ALIAS) && DECL_PT_UID (node) != DECL_UID (node))
{
if (flags & TDF_NOUID)
pp_printf (pp, "ptD.xxxx");
else
pp_printf (pp, "ptD.%u", DECL_PT_UID (node));
}
}
/* Like the above, but used for pretty printing function calls. */
static void
dump_function_name (pretty_printer *pp, tree node, dump_flags_t flags)
{
if (CONVERT_EXPR_P (node))
node = TREE_OPERAND (node, 0);
if (DECL_NAME (node) && (flags & TDF_ASMNAME) == 0)
pp_string (pp, lang_hooks.decl_printable_name (node, 1));
else
dump_decl_name (pp, node, flags);
}
/* Dump a function declaration. NODE is the FUNCTION_TYPE. PP, SPC and
FLAGS are as in dump_generic_node. */
static void
dump_function_declaration (pretty_printer *pp, tree node,
int spc, dump_flags_t flags)
{
bool wrote_arg = false;
tree arg;
pp_space (pp);
pp_left_paren (pp);
/* Print the argument types. */
arg = TYPE_ARG_TYPES (node);
while (arg && arg != void_list_node && arg != error_mark_node)
{
if (wrote_arg)
{
pp_comma (pp);
pp_space (pp);
}
wrote_arg = true;
dump_generic_node (pp, TREE_VALUE (arg), spc, flags, false);
arg = TREE_CHAIN (arg);
}
/* Drop the trailing void_type_node if we had any previous argument. */
if (arg == void_list_node && !wrote_arg)
pp_string (pp, "void");
/* Properly dump vararg function types. */
else if (!arg && wrote_arg)
pp_string (pp, ", ...");
/* Avoid printing any arg for unprototyped functions. */
pp_right_paren (pp);
}
/* Dump the domain associated with an array. */
static void
dump_array_domain (pretty_printer *pp, tree domain, int spc, dump_flags_t flags)
{
pp_left_bracket (pp);
if (domain)
{
tree min = TYPE_MIN_VALUE (domain);
tree max = TYPE_MAX_VALUE (domain);
if (min && max
&& integer_zerop (min)
&& tree_fits_shwi_p (max))
pp_wide_integer (pp, tree_to_shwi (max) + 1);
else
{
if (min)
dump_generic_node (pp, min, spc, flags, false);
pp_colon (pp);
if (max)
dump_generic_node (pp, max, spc, flags, false);
}
}
else
pp_string (pp, "<unknown>");
pp_right_bracket (pp);
}
/* Dump OpenMP iterators ITER. */
static void
dump_omp_iterators (pretty_printer *pp, tree iter, int spc, dump_flags_t flags)
{
pp_string (pp, "iterator(");
for (tree it = iter; it; it = TREE_CHAIN (it))
{
if (it != iter)
pp_string (pp, ", ");
dump_generic_node (pp, TREE_TYPE (TREE_VEC_ELT (it, 0)), spc, flags,
false);
pp_space (pp);
dump_generic_node (pp, TREE_VEC_ELT (it, 0), spc, flags, false);
pp_equal (pp);
dump_generic_node (pp, TREE_VEC_ELT (it, 1), spc, flags, false);
pp_colon (pp);
dump_generic_node (pp, TREE_VEC_ELT (it, 2), spc, flags, false);
pp_colon (pp);
dump_generic_node (pp, TREE_VEC_ELT (it, 3), spc, flags, false);
}
pp_right_paren (pp);
}
/* Dump OpenMP clause CLAUSE. PP, CLAUSE, SPC and FLAGS are as in
dump_generic_node. */
static void
dump_omp_clause (pretty_printer *pp, tree clause, int spc, dump_flags_t flags)
{
const char *name;
const char *modifier = NULL;
switch (OMP_CLAUSE_CODE (clause))
{
case OMP_CLAUSE_PRIVATE:
name = "private";
goto print_remap;
case OMP_CLAUSE_SHARED:
name = "shared";
goto print_remap;
case OMP_CLAUSE_FIRSTPRIVATE:
name = "firstprivate";
goto print_remap;
case OMP_CLAUSE_LASTPRIVATE:
name = "lastprivate";
if (OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (clause))
modifier = "conditional:";
goto print_remap;
case OMP_CLAUSE_COPYIN:
name = "copyin";
goto print_remap;
case OMP_CLAUSE_COPYPRIVATE:
name = "copyprivate";
goto print_remap;
case OMP_CLAUSE_UNIFORM:
name = "uniform";
goto print_remap;
case OMP_CLAUSE_USE_DEVICE_PTR:
name = "use_device_ptr";
if (OMP_CLAUSE_USE_DEVICE_PTR_IF_PRESENT (clause))
modifier = "if_present:";
goto print_remap;
case OMP_CLAUSE_USE_DEVICE_ADDR:
name = "use_device_addr";
goto print_remap;
case OMP_CLAUSE_IS_DEVICE_PTR:
name = "is_device_ptr";
goto print_remap;
case OMP_CLAUSE_INCLUSIVE:
name = "inclusive";
goto print_remap;
case OMP_CLAUSE_EXCLUSIVE:
name = "exclusive";
goto print_remap;
case OMP_CLAUSE__LOOPTEMP_:
name = "_looptemp_";
goto print_remap;
case OMP_CLAUSE__REDUCTEMP_:
name = "_reductemp_";
goto print_remap;
case OMP_CLAUSE__CONDTEMP_:
name = "_condtemp_";
goto print_remap;
case OMP_CLAUSE__SCANTEMP_:
name = "_scantemp_";
goto print_remap;
case OMP_CLAUSE_TO_DECLARE:
name = "to";
goto print_remap;
case OMP_CLAUSE_LINK:
name = "link";
goto print_remap;
case OMP_CLAUSE_NONTEMPORAL:
name = "nontemporal";
goto print_remap;
print_remap:
pp_string (pp, name);
pp_left_paren (pp);
if (modifier)
pp_string (pp, modifier);
dump_generic_node (pp, OMP_CLAUSE_DECL (clause),
spc, flags, false);
pp_right_paren (pp);
break;
case OMP_CLAUSE_TASK_REDUCTION:
case OMP_CLAUSE_IN_REDUCTION:
pp_string (pp, OMP_CLAUSE_CODE (clause) == OMP_CLAUSE_IN_REDUCTION
? "in_" : "task_");
/* FALLTHRU */
case OMP_CLAUSE_REDUCTION:
pp_string (pp, "reduction(");
if (OMP_CLAUSE_CODE (clause) == OMP_CLAUSE_REDUCTION)
{
if (OMP_CLAUSE_REDUCTION_TASK (clause))
pp_string (pp, "task,");
else if (OMP_CLAUSE_REDUCTION_INSCAN (clause))
pp_string (pp, "inscan,");
}
if (OMP_CLAUSE_REDUCTION_CODE (clause) != ERROR_MARK)
{
pp_string (pp,
op_symbol_code (OMP_CLAUSE_REDUCTION_CODE (clause)));
pp_colon (pp);
}
dump_generic_node (pp, OMP_CLAUSE_DECL (clause),
spc, flags, false);
pp_right_paren (pp);
break;
case OMP_CLAUSE_IF:
pp_string (pp, "if(");
switch (OMP_CLAUSE_IF_MODIFIER (clause))
{
case ERROR_MARK: break;
case VOID_CST: pp_string (pp, "cancel:"); break;
case OMP_PARALLEL: pp_string (pp, "parallel:"); break;
case OMP_SIMD: pp_string (pp, "simd:"); break;
case OMP_TASK: pp_string (pp, "task:"); break;
case OMP_TASKLOOP: pp_string (pp, "taskloop:"); break;
case OMP_TARGET_DATA: pp_string (pp, "target data:"); break;
case OMP_TARGET: pp_string (pp, "target:"); break;
case OMP_TARGET_UPDATE: pp_string (pp, "target update:"); break;
case OMP_TARGET_ENTER_DATA:
pp_string (pp, "target enter data:"); break;
case OMP_TARGET_EXIT_DATA: pp_string (pp, "target exit data:"); break;
default: gcc_unreachable ();
}
dump_generic_node (pp, OMP_CLAUSE_IF_EXPR (clause),
spc, flags, false);
pp_right_paren (pp);
break;
case OMP_CLAUSE_NUM_THREADS:
pp_string (pp, "num_threads(");
dump_generic_node (pp, OMP_CLAUSE_NUM_THREADS_EXPR (clause),
spc, flags, false);
pp_right_paren (pp);
break;
case OMP_CLAUSE_NOWAIT:
pp_string (pp, "nowait");
break;
case OMP_CLAUSE_ORDERED:
pp_string (pp, "ordered");
if (OMP_CLAUSE_ORDERED_EXPR (clause))
{
pp_left_paren (pp);
dump_generic_node (pp, OMP_CLAUSE_ORDERED_EXPR (clause),
spc, flags, false);
pp_right_paren (pp);
}
break;
case OMP_CLAUSE_DEFAULT:
pp_string (pp, "default(");
switch (OMP_CLAUSE_DEFAULT_KIND (clause))
{
case OMP_CLAUSE_DEFAULT_UNSPECIFIED:
break;
case OMP_CLAUSE_DEFAULT_SHARED:
pp_string (pp, "shared");
break;
case OMP_CLAUSE_DEFAULT_NONE:
pp_string (pp, "none");
break;
case OMP_CLAUSE_DEFAULT_PRIVATE:
pp_string (pp, "private");
break;
case OMP_CLAUSE_DEFAULT_FIRSTPRIVATE:
pp_string (pp, "firstprivate");
break;
case OMP_CLAUSE_DEFAULT_PRESENT:
pp_string (pp, "present");
break;
default:
gcc_unreachable ();
}
pp_right_paren (pp);
break;
case OMP_CLAUSE_SCHEDULE:
pp_string (pp, "schedule(");
if (OMP_CLAUSE_SCHEDULE_KIND (clause)
& (OMP_CLAUSE_SCHEDULE_MONOTONIC
| OMP_CLAUSE_SCHEDULE_NONMONOTONIC))
{
if (OMP_CLAUSE_SCHEDULE_KIND (clause)
& OMP_CLAUSE_SCHEDULE_MONOTONIC)
pp_string (pp, "monotonic");
else
pp_string (pp, "nonmonotonic");
if (OMP_CLAUSE_SCHEDULE_SIMD (clause))
pp_comma (pp);
else
pp_colon (pp);
}
if (OMP_CLAUSE_SCHEDULE_SIMD (clause))
pp_string (pp, "simd:");
switch (OMP_CLAUSE_SCHEDULE_KIND (clause) & OMP_CLAUSE_SCHEDULE_MASK)
{
case OMP_CLAUSE_SCHEDULE_STATIC:
pp_string (pp, "static");
break;
case OMP_CLAUSE_SCHEDULE_DYNAMIC:
pp_string (pp, "dynamic");
break;
case OMP_CLAUSE_SCHEDULE_GUIDED:
pp_string (pp, "guided");
break;
case OMP_CLAUSE_SCHEDULE_RUNTIME:
pp_string (pp, "runtime");
break;
case OMP_CLAUSE_SCHEDULE_AUTO:
pp_string (pp, "auto");
break;
default:
gcc_unreachable ();
}
if (OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (clause))
{
pp_comma (pp);
dump_generic_node (pp, OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (clause),
spc, flags, false);
}
pp_right_paren (pp);
break;
case OMP_CLAUSE_UNTIED:
pp_string (pp, "untied");
break;
case OMP_CLAUSE_COLLAPSE:
pp_string (pp, "collapse(");
dump_generic_node (pp, OMP_CLAUSE_COLLAPSE_EXPR (clause),
spc, flags, false);
pp_right_paren (pp);
break;
case OMP_CLAUSE_FINAL:
pp_string (pp, "final(");
dump_generic_node (pp, OMP_CLAUSE_FINAL_EXPR (clause),
spc, flags, false);
pp_right_paren (pp);
break;
case OMP_CLAUSE_MERGEABLE:
pp_string (pp, "mergeable");
break;
case OMP_CLAUSE_LINEAR:
pp_string (pp, "linear(");
switch (OMP_CLAUSE_LINEAR_KIND (clause))
{
case OMP_CLAUSE_LINEAR_DEFAULT:
break;
case OMP_CLAUSE_LINEAR_REF:
pp_string (pp, "ref(");
break;
case OMP_CLAUSE_LINEAR_VAL:
pp_string (pp, "val(");
break;
case OMP_CLAUSE_LINEAR_UVAL:
pp_string (pp, "uval(");
break;
default:
gcc_unreachable ();
}
dump_generic_node (pp, OMP_CLAUSE_DECL (clause),
spc, flags, false);
if (OMP_CLAUSE_LINEAR_KIND (clause) != OMP_CLAUSE_LINEAR_DEFAULT)
pp_right_paren (pp);
pp_colon (pp);
dump_generic_node (pp, OMP_CLAUSE_LINEAR_STEP (clause),
spc, flags, false);
pp_right_paren (pp);
break;
case OMP_CLAUSE_ALIGNED:
pp_string (pp, "aligned(");
dump_generic_node (pp, OMP_CLAUSE_DECL (clause),
spc, flags, false);
if (OMP_CLAUSE_ALIGNED_ALIGNMENT (clause))
{
pp_colon (pp);
dump_generic_node (pp, OMP_CLAUSE_ALIGNED_ALIGNMENT (clause),
spc, flags, false);
}
pp_right_paren (pp);
break;
case OMP_CLAUSE_DEPEND:
pp_string (pp, "depend(");
switch (OMP_CLAUSE_DEPEND_KIND (clause))
{
case OMP_CLAUSE_DEPEND_DEPOBJ:
name = "depobj";
break;
case OMP_CLAUSE_DEPEND_IN:
name = "in";
break;
case OMP_CLAUSE_DEPEND_OUT:
name = "out";
break;
case OMP_CLAUSE_DEPEND_INOUT:
name = "inout";
break;
case OMP_CLAUSE_DEPEND_MUTEXINOUTSET:
name = "mutexinoutset";
break;
case OMP_CLAUSE_DEPEND_SOURCE:
pp_string (pp, "source)");
return;
case OMP_CLAUSE_DEPEND_LAST:
name = "__internal__";
break;
case OMP_CLAUSE_DEPEND_SINK:
pp_string (pp, "sink:");
for (tree t = OMP_CLAUSE_DECL (clause); t; t = TREE_CHAIN (t))
if (TREE_CODE (t) == TREE_LIST)
{
dump_generic_node (pp, TREE_VALUE (t), spc, flags, false);
if (TREE_PURPOSE (t) != integer_zero_node)
{
if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (t))
pp_minus (pp);
else
pp_plus (pp);
dump_generic_node (pp, TREE_PURPOSE (t), spc, flags,
false);
}
if (TREE_CHAIN (t))
pp_comma (pp);
}
else
gcc_unreachable ();
pp_right_paren (pp);
return;
default:
gcc_unreachable ();
}
{
tree t = OMP_CLAUSE_DECL (clause);
if (TREE_CODE (t) == TREE_LIST
&& TREE_PURPOSE (t)
&& TREE_CODE (TREE_PURPOSE (t)) == TREE_VEC)
{
dump_omp_iterators (pp, TREE_PURPOSE (t), spc, flags);
pp_colon (pp);
t = TREE_VALUE (t);
}
pp_string (pp, name);
pp_colon (pp);
dump_generic_node (pp, t, spc, flags, false);
pp_right_paren (pp);
}
break;
case OMP_CLAUSE_MAP:
pp_string (pp, "map(");
switch (OMP_CLAUSE_MAP_KIND (clause))
{
case GOMP_MAP_ALLOC:
case GOMP_MAP_POINTER:
pp_string (pp, "alloc");
break;
case GOMP_MAP_IF_PRESENT:
pp_string (pp, "no_alloc");
break;
case GOMP_MAP_TO:
case GOMP_MAP_TO_PSET:
pp_string (pp, "to");
break;
case GOMP_MAP_FROM:
pp_string (pp, "from");
break;
case GOMP_MAP_TOFROM:
pp_string (pp, "tofrom");
break;
case GOMP_MAP_FORCE_ALLOC:
pp_string (pp, "force_alloc");
break;
case GOMP_MAP_FORCE_TO:
pp_string (pp, "force_to");
break;
case GOMP_MAP_FORCE_FROM:
pp_string (pp, "force_from");
break;
case GOMP_MAP_FORCE_TOFROM:
pp_string (pp, "force_tofrom");
break;
case GOMP_MAP_FORCE_PRESENT:
pp_string (pp, "force_present");
break;
case GOMP_MAP_DELETE:
pp_string (pp, "delete");
break;
case GOMP_MAP_FORCE_DEVICEPTR:
pp_string (pp, "force_deviceptr");
break;
case GOMP_MAP_ALWAYS_TO:
pp_string (pp, "always,to");
break;
case GOMP_MAP_ALWAYS_FROM:
pp_string (pp, "always,from");
break;
case GOMP_MAP_ALWAYS_TOFROM:
pp_string (pp, "always,tofrom");
break;
case GOMP_MAP_RELEASE:
pp_string (pp, "release");
break;
case GOMP_MAP_FIRSTPRIVATE_POINTER:
pp_string (pp, "firstprivate");
break;
case GOMP_MAP_FIRSTPRIVATE_REFERENCE:
pp_string (pp, "firstprivate ref");
break;
case GOMP_MAP_STRUCT:
pp_string (pp, "struct");
break;
case GOMP_MAP_ALWAYS_POINTER:
pp_string (pp, "always_pointer");
break;
case GOMP_MAP_DEVICE_RESIDENT:
pp_string (pp, "device_resident");
break;
case GOMP_MAP_LINK:
pp_string (pp, "link");
break;
case GOMP_MAP_ATTACH:
pp_string (pp, "attach");
break;
case GOMP_MAP_DETACH:
pp_string (pp, "detach");
break;
case GOMP_MAP_FORCE_DETACH:
pp_string (pp, "force_detach");
break;
case GOMP_MAP_ATTACH_DETACH:
pp_string (pp, "attach_detach");
break;
default:
gcc_unreachable ();
}
pp_colon (pp);
dump_generic_node (pp, OMP_CLAUSE_DECL (clause),
spc, flags, false);
print_clause_size:
if (OMP_CLAUSE_SIZE (clause))
{
switch (OMP_CLAUSE_CODE (clause) == OMP_CLAUSE_MAP
? OMP_CLAUSE_MAP_KIND (clause) : GOMP_MAP_TO)
{
case GOMP_MAP_POINTER:
case GOMP_MAP_FIRSTPRIVATE_POINTER:
case GOMP_MAP_FIRSTPRIVATE_REFERENCE:
case GOMP_MAP_ALWAYS_POINTER:
pp_string (pp, " [pointer assign, bias: ");
break;
case GOMP_MAP_TO_PSET:
pp_string (pp, " [pointer set, len: ");
break;
case GOMP_MAP_ATTACH:
case GOMP_MAP_DETACH:
case GOMP_MAP_FORCE_DETACH:
case GOMP_MAP_ATTACH_DETACH:
pp_string (pp, " [bias: ");
break;
default:
pp_string (pp, " [len: ");
break;
}
dump_generic_node (pp, OMP_CLAUSE_SIZE (clause),
spc, flags, false);
pp_right_bracket (pp);
}
pp_right_paren (pp);
break;
case OMP_CLAUSE_FROM:
pp_string (pp, "from(");
dump_generic_node (pp, OMP_CLAUSE_DECL (clause),
spc, flags, false);
goto print_clause_size;
case OMP_CLAUSE_TO:
pp_string (pp, "to(");
dump_generic_node (pp, OMP_CLAUSE_DECL (clause),
spc, flags, false);
goto print_clause_size;
case OMP_CLAUSE__CACHE_:
pp_string (pp, "(");
dump_generic_node (pp, OMP_CLAUSE_DECL (clause),
spc, flags, false);
goto print_clause_size;
case OMP_CLAUSE_NUM_TEAMS:
pp_string (pp, "num_teams(");
dump_generic_node (pp, OMP_CLAUSE_NUM_TEAMS_EXPR (clause),
spc, flags, false);
pp_right_paren (pp);
break;
case OMP_CLAUSE_THREAD_LIMIT:
pp_string (pp, "thread_limit(");
dump_generic_node (pp, OMP_CLAUSE_THREAD_LIMIT_EXPR (clause),
spc, flags, false);
pp_right_paren (pp);
break;
case OMP_CLAUSE_DEVICE:
pp_string (pp, "device(");
dump_generic_node (pp, OMP_CLAUSE_DEVICE_ID (clause),
spc, flags, false);
pp_right_paren (pp);
break;
case OMP_CLAUSE_DIST_SCHEDULE:
pp_string (pp, "dist_schedule(static");
if (OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (clause))
{
pp_comma (pp);
dump_generic_node (pp,
OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (clause),
spc, flags, false);
}
pp_right_paren (pp);
break;
case OMP_CLAUSE_PROC_BIND:
pp_string (pp, "proc_bind(");
switch (OMP_CLAUSE_PROC_BIND_KIND (clause))
{
case OMP_CLAUSE_PROC_BIND_MASTER:
pp_string (pp, "master");
break;
case OMP_CLAUSE_PROC_BIND_CLOSE:
pp_string (pp, "close");
break;
case OMP_CLAUSE_PROC_BIND_SPREAD:
pp_string (pp, "spread");
break;
default:
gcc_unreachable ();
}
pp_right_paren (pp);
break;
case OMP_CLAUSE_DEVICE_TYPE:
pp_string (pp, "device_type(");
switch (OMP_CLAUSE_DEVICE_TYPE_KIND (clause))
{
case OMP_CLAUSE_DEVICE_TYPE_HOST:
pp_string (pp, "host");
break;
case OMP_CLAUSE_DEVICE_TYPE_NOHOST:
pp_string (pp, "nohost");
break;
case OMP_CLAUSE_DEVICE_TYPE_ANY:
pp_string (pp, "any");
break;
default:
gcc_unreachable ();
}
pp_right_paren (pp);
break;
case OMP_CLAUSE_SAFELEN:
pp_string (pp, "safelen(");
dump_generic_node (pp, OMP_CLAUSE_SAFELEN_EXPR (clause),
spc, flags, false);
pp_right_paren (pp);
break;
case OMP_CLAUSE_SIMDLEN:
pp_string (pp, "simdlen(");
dump_generic_node (pp, OMP_CLAUSE_SIMDLEN_EXPR (clause),
spc, flags, false);
pp_right_paren (pp);
break;
case OMP_CLAUSE_PRIORITY:
pp_string (pp, "priority(");
dump_generic_node (pp, OMP_CLAUSE_PRIORITY_EXPR (clause),
spc, flags, false);
pp_right_paren (pp);
break;
case OMP_CLAUSE_GRAINSIZE:
pp_string (pp, "grainsize(");
dump_generic_node (pp, OMP_CLAUSE_GRAINSIZE_EXPR (clause),
spc, flags, false);
pp_right_paren (pp);
break;
case OMP_CLAUSE_NUM_TASKS:
pp_string (pp, "num_tasks(");
dump_generic_node (pp, OMP_CLAUSE_NUM_TASKS_EXPR (clause),
spc, flags, false);
pp_right_paren (pp);
break;
case OMP_CLAUSE_HINT:
pp_string (pp, "hint(");
dump_generic_node (pp, OMP_CLAUSE_HINT_EXPR (clause),
spc, flags, false);
pp_right_paren (pp);
break;
case OMP_CLAUSE_DEFAULTMAP:
pp_string (pp, "defaultmap(");
switch (OMP_CLAUSE_DEFAULTMAP_BEHAVIOR (clause))
{
case OMP_CLAUSE_DEFAULTMAP_ALLOC:
pp_string (pp, "alloc");
break;
case OMP_CLAUSE_DEFAULTMAP_TO:
pp_string (pp, "to");
break;
case OMP_CLAUSE_DEFAULTMAP_FROM:
pp_string (pp, "from");
break;
case OMP_CLAUSE_DEFAULTMAP_TOFROM:
pp_string (pp, "tofrom");
break;
case OMP_CLAUSE_DEFAULTMAP_FIRSTPRIVATE:
pp_string (pp, "firstprivate");
break;
case OMP_CLAUSE_DEFAULTMAP_NONE:
pp_string (pp, "none");
break;
case OMP_CLAUSE_DEFAULTMAP_DEFAULT:
pp_string (pp, "default");
break;
default:
gcc_unreachable ();
}
switch (OMP_CLAUSE_DEFAULTMAP_CATEGORY (clause))
{
case OMP_CLAUSE_DEFAULTMAP_CATEGORY_UNSPECIFIED:
break;
case OMP_CLAUSE_DEFAULTMAP_CATEGORY_SCALAR:
pp_string (pp, ":scalar");
break;
case OMP_CLAUSE_DEFAULTMAP_CATEGORY_AGGREGATE:
pp_string (pp, ":aggregate");
break;
case OMP_CLAUSE_DEFAULTMAP_CATEGORY_ALLOCATABLE:
pp_string (pp, ":allocatable");
break;
case OMP_CLAUSE_DEFAULTMAP_CATEGORY_POINTER:
pp_string (pp, ":pointer");
break;
default:
gcc_unreachable ();
}
pp_right_paren (pp);
break;
case OMP_CLAUSE_ORDER:
pp_string (pp, "order(concurrent)");
break;
case OMP_CLAUSE_BIND:
pp_string (pp, "bind(");
switch (OMP_CLAUSE_BIND_KIND (clause))
{
case OMP_CLAUSE_BIND_TEAMS:
pp_string (pp, "teams");
break;
case OMP_CLAUSE_BIND_PARALLEL:
pp_string (pp, "parallel");
break;
case OMP_CLAUSE_BIND_THREAD:
pp_string (pp, "thread");
break;
default:
gcc_unreachable ();
}
pp_right_paren (pp);
break;
case OMP_CLAUSE__SIMDUID_:
pp_string (pp, "_simduid_(");
dump_generic_node (pp, OMP_CLAUSE__SIMDUID__DECL (clause),
spc, flags, false);
pp_right_paren (pp);
break;
case OMP_CLAUSE__SIMT_:
pp_string (pp, "_simt_");
break;
case OMP_CLAUSE_GANG:
pp_string (pp, "gang");
if (OMP_CLAUSE_GANG_EXPR (clause) != NULL_TREE)
{
pp_string (pp, "(num: ");
dump_generic_node (pp, OMP_CLAUSE_GANG_EXPR (clause),
spc, flags, false);
}
if (OMP_CLAUSE_GANG_STATIC_EXPR (clause) != NULL_TREE)
{
if (OMP_CLAUSE_GANG_EXPR (clause) == NULL_TREE)
pp_left_paren (pp);
else
pp_space (pp);
pp_string (pp, "static:");
if (OMP_CLAUSE_GANG_STATIC_EXPR (clause)
== integer_minus_one_node)
pp_character (pp, '*');
else
dump_generic_node (pp, OMP_CLAUSE_GANG_STATIC_EXPR (clause),
spc, flags, false);
}
if (OMP_CLAUSE_GANG_EXPR (clause) != NULL_TREE
|| OMP_CLAUSE_GANG_STATIC_EXPR (clause) != NULL_TREE)
pp_right_paren (pp);
break;
case OMP_CLAUSE_ASYNC:
pp_string (pp, "async");
if (OMP_CLAUSE_ASYNC_EXPR (clause))
{
pp_character(pp, '(');
dump_generic_node (pp, OMP_CLAUSE_ASYNC_EXPR (clause),
spc, flags, false);
pp_character(pp, ')');
}
break;
case OMP_CLAUSE_AUTO:
case OMP_CLAUSE_SEQ:
pp_string (pp, omp_clause_code_name[OMP_CLAUSE_CODE (clause)]);
break;
case OMP_CLAUSE_WAIT:
pp_string (pp, "wait(");
dump_generic_node (pp, OMP_CLAUSE_WAIT_EXPR (clause),
spc, flags, false);
pp_character(pp, ')');
break;
case OMP_CLAUSE_WORKER:
pp_string (pp, "worker");
if (OMP_CLAUSE_WORKER_EXPR (clause) != NULL_TREE)
{
pp_left_paren (pp);
dump_generic_node (pp, OMP_CLAUSE_WORKER_EXPR (clause),
spc, flags, false);
pp_right_paren (pp);
}
break;
case OMP_CLAUSE_VECTOR:
pp_string (pp, "vector");
if (OMP_CLAUSE_VECTOR_EXPR (clause) != NULL_TREE)
{
pp_left_paren (pp);
dump_generic_node (pp, OMP_CLAUSE_VECTOR_EXPR (clause),
spc, flags, false);
pp_right_paren (pp);
}
break;
case OMP_CLAUSE_NUM_GANGS:
pp_string (pp, "num_gangs(");
dump_generic_node (pp, OMP_CLAUSE_NUM_GANGS_EXPR (clause),
spc, flags, false);
pp_character (pp, ')');
break;
case OMP_CLAUSE_NUM_WORKERS:
pp_string (pp, "num_workers(");
dump_generic_node (pp, OMP_CLAUSE_NUM_WORKERS_EXPR (clause),
spc, flags, false);
pp_character (pp, ')');
break;
case OMP_CLAUSE_VECTOR_LENGTH:
pp_string (pp, "vector_length(");
dump_generic_node (pp, OMP_CLAUSE_VECTOR_LENGTH_EXPR (clause),
spc, flags, false);
pp_character (pp, ')');
break;
case OMP_CLAUSE_INBRANCH:
pp_string (pp, "inbranch");
break;
case OMP_CLAUSE_NOTINBRANCH:
pp_string (pp, "notinbranch");
break;
case OMP_CLAUSE_FOR:
pp_string (pp, "for");
break;
case OMP_CLAUSE_PARALLEL:
pp_string (pp, "parallel");
break;
case OMP_CLAUSE_SECTIONS:
pp_string (pp, "sections");
break;
case OMP_CLAUSE_TASKGROUP:
pp_string (pp, "taskgroup");
break;
case OMP_CLAUSE_NOGROUP:
pp_string (pp, "nogroup");
break;
case OMP_CLAUSE_THREADS:
pp_string (pp, "threads");
break;
case OMP_CLAUSE_SIMD:
pp_string (pp, "simd");
break;
case OMP_CLAUSE_INDEPENDENT:
pp_string (pp, "independent");
break;
case OMP_CLAUSE_TILE:
pp_string (pp, "tile(");
dump_generic_node (pp, OMP_CLAUSE_TILE_LIST (clause),
spc, flags, false);
pp_right_paren (pp);
break;
case OMP_CLAUSE_IF_PRESENT:
pp_string (pp, "if_present");
break;
case OMP_CLAUSE_FINALIZE:
pp_string (pp, "finalize");
break;
default:
gcc_unreachable ();
}
}
/* Dump the list of OpenMP clauses. PP, SPC and FLAGS are as in
dump_generic_node. */
void
dump_omp_clauses (pretty_printer *pp, tree clause, int spc, dump_flags_t flags)
{
if (clause == NULL)
return;
pp_space (pp);
while (1)
{
dump_omp_clause (pp, clause, spc, flags);
clause = OMP_CLAUSE_CHAIN (clause);
if (clause == NULL)
return;
pp_space (pp);
}
}
/* Dump location LOC to PP. */
void
dump_location (pretty_printer *pp, location_t loc)
{
expanded_location xloc = expand_location (loc);
pp_left_bracket (pp);
if (xloc.file)
{
pp_string (pp, xloc.file);
pp_string (pp, ":");
}
pp_decimal_int (pp, xloc.line);
pp_colon (pp);
pp_decimal_int (pp, xloc.column);
pp_string (pp, "] ");
}
/* Dump lexical block BLOCK. PP, SPC and FLAGS are as in
dump_generic_node. */
static void
dump_block_node (pretty_printer *pp, tree block, int spc, dump_flags_t flags)
{
tree t;
pp_printf (pp, "BLOCK #%d ", BLOCK_NUMBER (block));
if (flags & TDF_ADDRESS)
pp_printf (pp, "[%p] ", (void *) block);
if (TREE_ASM_WRITTEN (block))
pp_string (pp, "[written] ");
if (flags & TDF_SLIM)
return;
if (BLOCK_SOURCE_LOCATION (block))
dump_location (pp, BLOCK_SOURCE_LOCATION (block));
newline_and_indent (pp, spc + 2);
if (BLOCK_SUPERCONTEXT (block))
{
pp_string (pp, "SUPERCONTEXT: ");
dump_generic_node (pp, BLOCK_SUPERCONTEXT (block), 0,
flags | TDF_SLIM, false);
newline_and_indent (pp, spc + 2);
}
if (BLOCK_SUBBLOCKS (block))
{
pp_string (pp, "SUBBLOCKS: ");
for (t = BLOCK_SUBBLOCKS (block); t; t = BLOCK_CHAIN (t))
{
dump_generic_node (pp, t, 0, flags | TDF_SLIM, false);
pp_space (pp);
}
newline_and_indent (pp, spc + 2);
}
if (BLOCK_CHAIN (block))
{
pp_string (pp, "SIBLINGS: ");
for (t = BLOCK_CHAIN (block); t; t = BLOCK_CHAIN (t))
{
dump_generic_node (pp, t, 0, flags | TDF_SLIM, false);
pp_space (pp);
}
newline_and_indent (pp, spc + 2);
}
if (BLOCK_VARS (block))
{
pp_string (pp, "VARS: ");
for (t = BLOCK_VARS (block); t; t = TREE_CHAIN (t))
{
dump_generic_node (pp, t, 0, flags, false);
pp_space (pp);
}
newline_and_indent (pp, spc + 2);
}
if (vec_safe_length (BLOCK_NONLOCALIZED_VARS (block)) > 0)
{
unsigned i;
vec<tree, va_gc> *nlv = BLOCK_NONLOCALIZED_VARS (block);
pp_string (pp, "NONLOCALIZED_VARS: ");
FOR_EACH_VEC_ELT (*nlv, i, t)
{
dump_generic_node (pp, t, 0, flags, false);
pp_space (pp);
}
newline_and_indent (pp, spc + 2);
}
if (BLOCK_ABSTRACT_ORIGIN (block))
{
pp_string (pp, "ABSTRACT_ORIGIN: ");
dump_generic_node (pp, BLOCK_ABSTRACT_ORIGIN (block), 0,
flags | TDF_SLIM, false);
newline_and_indent (pp, spc + 2);
}
if (BLOCK_FRAGMENT_ORIGIN (block))
{
pp_string (pp, "FRAGMENT_ORIGIN: ");
dump_generic_node (pp, BLOCK_FRAGMENT_ORIGIN (block), 0,
flags | TDF_SLIM, false);
newline_and_indent (pp, spc + 2);
}
if (BLOCK_FRAGMENT_CHAIN (block))
{
pp_string (pp, "FRAGMENT_CHAIN: ");
for (t = BLOCK_FRAGMENT_CHAIN (block); t; t = BLOCK_FRAGMENT_CHAIN (t))
{
dump_generic_node (pp, t, 0, flags | TDF_SLIM, false);
pp_space (pp);
}
newline_and_indent (pp, spc + 2);
}
}
/* Dump #pragma omp atomic memory order clause. */
void
dump_omp_atomic_memory_order (pretty_printer *pp, enum omp_memory_order mo)
{
switch (mo)
{
case OMP_MEMORY_ORDER_RELAXED:
pp_string (pp, " relaxed");
break;
case OMP_MEMORY_ORDER_SEQ_CST:
pp_string (pp, " seq_cst");
break;
case OMP_MEMORY_ORDER_ACQ_REL:
pp_string (pp, " acq_rel");
break;
case OMP_MEMORY_ORDER_ACQUIRE:
pp_string (pp, " acquire");
break;
case OMP_MEMORY_ORDER_RELEASE:
pp_string (pp, " release");
break;
case OMP_MEMORY_ORDER_UNSPECIFIED:
break;
default:
gcc_unreachable ();
}
}
/* Helper to dump a MEM_REF node. */
static void
dump_mem_ref (pretty_printer *pp, tree node, int spc, dump_flags_t flags)
{
if (TREE_CODE (node) == MEM_REF && (flags & TDF_GIMPLE))
{
pp_string (pp, "__MEM <");
dump_generic_node (pp, TREE_TYPE (node),
spc, flags | TDF_SLIM, false);
if (TYPE_ALIGN (TREE_TYPE (node))
!= TYPE_ALIGN (TYPE_MAIN_VARIANT (TREE_TYPE (node))))
{
pp_string (pp, ", ");
pp_decimal_int (pp, TYPE_ALIGN (TREE_TYPE (node)));
}
pp_greater (pp);
pp_string (pp, " (");
if (TREE_TYPE (TREE_OPERAND (node, 0))
!= TREE_TYPE (TREE_OPERAND (node, 1)))
{
pp_left_paren (pp);
dump_generic_node (pp, TREE_TYPE (TREE_OPERAND (node, 1)),
spc, flags | TDF_SLIM, false);
pp_right_paren (pp);
}
dump_generic_node (pp, TREE_OPERAND (node, 0),
spc, flags | TDF_SLIM, false);
if (! integer_zerop (TREE_OPERAND (node, 1)))
{
pp_string (pp, " + ");
dump_generic_node (pp, TREE_OPERAND (node, 1),
spc, flags | TDF_SLIM, false);
}
pp_right_paren (pp);
}
else if (TREE_CODE (node) == MEM_REF
&& integer_zerop (TREE_OPERAND (node, 1))
/* Dump the types of INTEGER_CSTs explicitly, for we can't
infer them and MEM_ATTR caching will share MEM_REFs
with differently-typed op0s. */
&& TREE_CODE (TREE_OPERAND (node, 0)) != INTEGER_CST
/* Released SSA_NAMES have no TREE_TYPE. */
&& TREE_TYPE (TREE_OPERAND (node, 0)) != NULL_TREE
/* Same pointer types, but ignoring POINTER_TYPE vs.
REFERENCE_TYPE. */
&& (TREE_TYPE (TREE_TYPE (TREE_OPERAND (node, 0)))
== TREE_TYPE (TREE_TYPE (TREE_OPERAND (node, 1))))
&& (TYPE_MODE (TREE_TYPE (TREE_OPERAND (node, 0)))
== TYPE_MODE (TREE_TYPE (TREE_OPERAND (node, 1))))
&& (TYPE_REF_CAN_ALIAS_ALL (TREE_TYPE (TREE_OPERAND (node, 0)))
== TYPE_REF_CAN_ALIAS_ALL (TREE_TYPE (TREE_OPERAND (node, 1))))
/* Same value types ignoring qualifiers. */
&& (TYPE_MAIN_VARIANT (TREE_TYPE (node))
== TYPE_MAIN_VARIANT
(TREE_TYPE (TREE_TYPE (TREE_OPERAND (node, 1)))))
&& (!(flags & TDF_ALIAS)
|| MR_DEPENDENCE_CLIQUE (node) == 0))
{
if (TREE_CODE (TREE_OPERAND (node, 0)) != ADDR_EXPR)
{
/* Enclose pointers to arrays in parentheses. */
tree op0 = TREE_OPERAND (node, 0);
tree op0type = TREE_TYPE (op0);
if (POINTER_TYPE_P (op0type)
&& TREE_CODE (TREE_TYPE (op0type)) == ARRAY_TYPE)
pp_left_paren (pp);
pp_star (pp);
dump_generic_node (pp, op0, spc, flags, false);
if (POINTER_TYPE_P (op0type)
&& TREE_CODE (TREE_TYPE (op0type)) == ARRAY_TYPE)
pp_right_paren (pp);
}
else
dump_generic_node (pp,
TREE_OPERAND (TREE_OPERAND (node, 0), 0),
spc, flags, false);
}
else
{
pp_string (pp, "MEM");
tree nodetype = TREE_TYPE (node);
tree op0 = TREE_OPERAND (node, 0);
tree op1 = TREE_OPERAND (node, 1);
tree op1type = TYPE_MAIN_VARIANT (TREE_TYPE (op1));
tree op0size = TYPE_SIZE (nodetype);
tree op1size = TYPE_SIZE (TREE_TYPE (op1type));
if (!op0size || !op1size
|| !operand_equal_p (op0size, op1size, 0))
{
pp_string (pp, " <");
/* If the size of the type of the operand is not the same
as the size of the MEM_REF expression include the type
of the latter similar to the TDF_GIMPLE output to make
it clear how many bytes of memory are being accessed. */
dump_generic_node (pp, nodetype, spc, flags | TDF_SLIM, false);
pp_string (pp, "> ");
}
pp_string (pp, "[(");
dump_generic_node (pp, op1type, spc, flags | TDF_SLIM, false);
pp_right_paren (pp);
dump_generic_node (pp, op0, spc, flags, false);
if (!integer_zerop (op1))
{
pp_string (pp, " + ");
dump_generic_node (pp, op1, spc, flags, false);
}
if (TREE_CODE (node) == TARGET_MEM_REF)
{
tree tmp = TMR_INDEX2 (node);
if (tmp)
{
pp_string (pp, " + ");
dump_generic_node (pp, tmp, spc, flags, false);
}
tmp = TMR_INDEX (node);
if (tmp)
{
pp_string (pp, " + ");
dump_generic_node (pp, tmp, spc, flags, false);
tmp = TMR_STEP (node);
pp_string (pp, " * ");
if (tmp)
dump_generic_node (pp, tmp, spc, flags, false);
else
pp_string (pp, "1");
}
}
if ((flags & TDF_ALIAS)
&& MR_DEPENDENCE_CLIQUE (node) != 0)
{
pp_string (pp, " clique ");
pp_unsigned_wide_integer (pp, MR_DEPENDENCE_CLIQUE (node));
pp_string (pp, " base ");
pp_unsigned_wide_integer (pp, MR_DEPENDENCE_BASE (node));
}
pp_right_bracket (pp);
}
}
/* Helper function for dump_generic_node. Dump INIT or COND expression for
OpenMP loop non-rectangular iterators. */
void
dump_omp_loop_non_rect_expr (pretty_printer *pp, tree node, int spc,
dump_flags_t flags)
{
gcc_assert (TREE_CODE (node) == TREE_VEC);
dump_generic_node (pp, TREE_VEC_ELT (node, 0), spc, flags, false);
pp_string (pp, " * ");
if (op_prio (TREE_VEC_ELT (node, 1)) <= op_code_prio (MULT_EXPR))
{
pp_left_paren (pp);
dump_generic_node (pp, TREE_VEC_ELT (node, 1), spc, flags, false);
pp_right_paren (pp);
}
else
dump_generic_node (pp, TREE_VEC_ELT (node, 1), spc, flags, false);
pp_string (pp, " + ");
if (op_prio (TREE_VEC_ELT (node, 1)) <= op_code_prio (PLUS_EXPR))
{
pp_left_paren (pp);
dump_generic_node (pp, TREE_VEC_ELT (node, 2), spc, flags, false);
pp_right_paren (pp);
}
else
dump_generic_node (pp, TREE_VEC_ELT (node, 2), spc, flags, false);
}
/* Dump the node NODE on the pretty_printer PP, SPC spaces of
indent. FLAGS specifies details to show in the dump (see TDF_* in
dumpfile.h). If IS_STMT is true, the object printed is considered
to be a statement and it is terminated by ';' if appropriate. */
int
dump_generic_node (pretty_printer *pp, tree node, int spc, dump_flags_t flags,
bool is_stmt)
{
tree type;
tree op0, op1;
const char *str;
bool is_expr;
enum tree_code code;
if (node == NULL_TREE)
return spc;
is_expr = EXPR_P (node);
if (is_stmt && (flags & TDF_STMTADDR))
pp_printf (pp, "<&%p> ", (void *)node);
if ((flags & TDF_LINENO) && EXPR_HAS_LOCATION (node))
dump_location (pp, EXPR_LOCATION (node));
code = TREE_CODE (node);
switch (code)
{
case ERROR_MARK:
pp_string (pp, "<<< error >>>");
break;
case IDENTIFIER_NODE:
pp_tree_identifier (pp, node);
break;
case TREE_LIST:
while (node && node != error_mark_node)
{
if (TREE_PURPOSE (node))
{
dump_generic_node (pp, TREE_PURPOSE (node), spc, flags, false);
pp_space (pp);
}
dump_generic_node (pp, TREE_VALUE (node), spc, flags, false);
node = TREE_CHAIN (node);
if (node && TREE_CODE (node) == TREE_LIST)
{
pp_comma (pp);
pp_space (pp);
}
}
break;
case TREE_BINFO:
dump_generic_node (pp, BINFO_TYPE (node), spc, flags, false);
break;
case TREE_VEC:
{
size_t i;
if (TREE_VEC_LENGTH (node) > 0)
{
size_t len = TREE_VEC_LENGTH (node);
for (i = 0; i < len - 1; i++)
{
dump_generic_node (pp, TREE_VEC_ELT (node, i), spc, flags,
false);
pp_comma (pp);
pp_space (pp);
}
dump_generic_node (pp, TREE_VEC_ELT (node, len - 1), spc,
flags, false);
}
}
break;
case VOID_TYPE:
case INTEGER_TYPE:
case REAL_TYPE:
case FIXED_POINT_TYPE:
case COMPLEX_TYPE:
case VECTOR_TYPE:
case ENUMERAL_TYPE:
case BOOLEAN_TYPE:
{
unsigned int quals = TYPE_QUALS (node);
enum tree_code_class tclass;
if (quals & TYPE_QUAL_ATOMIC)
pp_string (pp, "atomic ");
if (quals & TYPE_QUAL_CONST)
pp_string (pp, "const ");
if (quals & TYPE_QUAL_VOLATILE)
pp_string (pp, "volatile ");
if (quals & TYPE_QUAL_RESTRICT)
pp_string (pp, "restrict ");
if (!ADDR_SPACE_GENERIC_P (TYPE_ADDR_SPACE (node)))
{
pp_string (pp, "<address-space-");
pp_decimal_int (pp, TYPE_ADDR_SPACE (node));
pp_string (pp, "> ");
}
tclass = TREE_CODE_CLASS (TREE_CODE (node));
if (tclass == tcc_declaration)
{
if (DECL_NAME (node))
dump_decl_name (pp, node, flags);
else
pp_string (pp, "<unnamed type decl>");
}
else if (tclass == tcc_type)
{
if (TYPE_NAME (node))
{
if (TREE_CODE (TYPE_NAME (node)) == IDENTIFIER_NODE)
pp_tree_identifier (pp, TYPE_NAME (node));
else if (TREE_CODE (TYPE_NAME (node)) == TYPE_DECL
&& DECL_NAME (TYPE_NAME (node)))
dump_decl_name (pp, TYPE_NAME (node), flags);
else
pp_string (pp, "<unnamed type>");
}
else if (TREE_CODE (node) == VECTOR_TYPE)
{
pp_string (pp, "vector");
pp_left_paren (pp);
pp_wide_integer (pp, TYPE_VECTOR_SUBPARTS (node));
pp_string (pp, ") ");
dump_generic_node (pp, TREE_TYPE (node), spc, flags, false);
}
else if (TREE_CODE (node) == INTEGER_TYPE)
{
if (TYPE_PRECISION (node) == CHAR_TYPE_SIZE)
pp_string (pp, (TYPE_UNSIGNED (node)
? "unsigned char"
: "signed char"));
else if (TYPE_PRECISION (node) == SHORT_TYPE_SIZE)
pp_string (pp, (TYPE_UNSIGNED (node)
? "unsigned short"
: "signed short"));
else if (TYPE_PRECISION (node) == INT_TYPE_SIZE)
pp_string (pp, (TYPE_UNSIGNED (node)
? "unsigned int"
: "signed int"));
else if (TYPE_PRECISION (node) == LONG_TYPE_SIZE)
pp_string (pp, (TYPE_UNSIGNED (node)
? "unsigned long"
: "signed long"));
else if (TYPE_PRECISION (node) == LONG_LONG_TYPE_SIZE)
pp_string (pp, (TYPE_UNSIGNED (node)
? "unsigned long long"
: "signed long long"));
else if (TYPE_PRECISION (node) >= CHAR_TYPE_SIZE
&& pow2p_hwi (TYPE_PRECISION (node)))
{
pp_string (pp, (TYPE_UNSIGNED (node) ? "uint" : "int"));
pp_decimal_int (pp, TYPE_PRECISION (node));
pp_string (pp, "_t");
}
else
{
pp_string (pp, (TYPE_UNSIGNED (node)
? "<unnamed-unsigned:"
: "<unnamed-signed:"));
pp_decimal_int (pp, TYPE_PRECISION (node));
pp_greater (pp);
}
}
else if (TREE_CODE (node) == COMPLEX_TYPE)
{
pp_string (pp, "__complex__ ");
dump_generic_node (pp, TREE_TYPE (node), spc, flags, false);
}
else if (TREE_CODE (node) == REAL_TYPE)
{
pp_string (pp, "<float:");
pp_decimal_int (pp, TYPE_PRECISION (node));
pp_greater (pp);
}
else if (TREE_CODE (node) == FIXED_POINT_TYPE)
{
pp_string (pp, "<fixed-point-");
pp_string (pp, TYPE_SATURATING (node) ? "sat:" : "nonsat:");
pp_decimal_int (pp, TYPE_PRECISION (node));
pp_greater (pp);
}
else if (TREE_CODE (node) == BOOLEAN_TYPE)
{
pp_string (pp, (TYPE_UNSIGNED (node)
? "<unsigned-boolean:"
: "<signed-boolean:"));
pp_decimal_int (pp, TYPE_PRECISION (node));
pp_greater (pp);
}
else if (TREE_CODE (node) == VOID_TYPE)
pp_string (pp, "void");
else
pp_string (pp, "<unnamed type>");
}
break;
}
case POINTER_TYPE:
case REFERENCE_TYPE:
str = (TREE_CODE (node) == POINTER_TYPE ? "*" : "&");
if (TREE_TYPE (node) == NULL)
{
pp_string (pp, str);
pp_string (pp, "<null type>");
}
else if (TREE_CODE (TREE_TYPE (node)) == FUNCTION_TYPE)
{
tree fnode = TREE_TYPE (node);
dump_generic_node (pp, TREE_TYPE (fnode), spc, flags, false);
pp_space (pp);
pp_left_paren (pp);
pp_string (pp, str);
if (TYPE_IDENTIFIER (node))
dump_generic_node (pp, TYPE_NAME (node), spc, flags, false);
else if (flags & TDF_NOUID)
pp_printf (pp, "<Txxxx>");
else
pp_printf (pp, "<T%x>", TYPE_UID (node));
pp_right_paren (pp);
dump_function_declaration (pp, fnode, spc, flags);
}
else
{
unsigned int quals = TYPE_QUALS (node);
dump_generic_node (pp, TREE_TYPE (node), spc, flags, false);
pp_space (pp);
pp_string (pp, str);
if (quals & TYPE_QUAL_CONST)
pp_string (pp, " const");
if (quals & TYPE_QUAL_VOLATILE)
pp_string (pp, " volatile");
if (quals & TYPE_QUAL_RESTRICT)
pp_string (pp, " restrict");
if (!ADDR_SPACE_GENERIC_P (TYPE_ADDR_SPACE (node)))
{
pp_string (pp, " <address-space-");
pp_decimal_int (pp, TYPE_ADDR_SPACE (node));
pp_greater (pp);
}
if (TYPE_REF_CAN_ALIAS_ALL (node))
pp_string (pp, " {ref-all}");
}
break;
case OFFSET_TYPE:
NIY;
break;
case MEM_REF:
case TARGET_MEM_REF:
dump_mem_ref (pp, node, spc, flags);
break;
case ARRAY_TYPE:
{
unsigned int quals = TYPE_QUALS (node);
tree tmp;
if (quals & TYPE_QUAL_ATOMIC)
pp_string (pp, "atomic ");
if (quals & TYPE_QUAL_CONST)
pp_string (pp, "const ");
if (quals & TYPE_QUAL_VOLATILE)
pp_string (pp, "volatile ");
/* Print the innermost component type. */
for (tmp = TREE_TYPE (node); TREE_CODE (tmp) == ARRAY_TYPE;
tmp = TREE_TYPE (tmp))
;
dump_generic_node (pp, tmp, spc, flags, false);
/* Print the dimensions. */
for (tmp = node; TREE_CODE (tmp) == ARRAY_TYPE; tmp = TREE_TYPE (tmp))
dump_array_domain (pp, TYPE_DOMAIN (tmp), spc, flags);
break;
}
case RECORD_TYPE:
case UNION_TYPE:
case QUAL_UNION_TYPE:
{
unsigned int quals = TYPE_QUALS (node);
if (quals & TYPE_QUAL_ATOMIC)
pp_string (pp, "atomic ");
if (quals & TYPE_QUAL_CONST)
pp_string (pp, "const ");
if (quals & TYPE_QUAL_VOLATILE)
pp_string (pp, "volatile ");
/* Print the name of the structure. */
if (TREE_CODE (node) == RECORD_TYPE)
pp_string (pp, "struct ");
else if (TREE_CODE (node) == UNION_TYPE)
pp_string (pp, "union ");
if (TYPE_NAME (node))
dump_generic_node (pp, TYPE_NAME (node), spc, flags, false);
else if (!(flags & TDF_SLIM))
/* FIXME: If we eliminate the 'else' above and attempt
to show the fields for named types, we may get stuck
following a cycle of pointers to structs. The alleged
self-reference check in print_struct_decl will not detect
cycles involving more than one pointer or struct type. */
print_struct_decl (pp, node, spc, flags);
break;
}
case LANG_TYPE:
NIY;
break;
case INTEGER_CST:
if (flags & TDF_GIMPLE
&& (POINTER_TYPE_P (TREE_TYPE (node))
|| (TYPE_PRECISION (TREE_TYPE (node))
< TYPE_PRECISION (integer_type_node))
|| exact_log2 (TYPE_PRECISION (TREE_TYPE (node))) == -1
|| tree_int_cst_sgn (node) < 0))
{
pp_string (pp, "_Literal (");
dump_generic_node (pp, TREE_TYPE (node), spc, flags, false);
pp_string (pp, ") ");
}
if (TREE_CODE (TREE_TYPE (node)) == POINTER_TYPE
&& ! (flags & TDF_GIMPLE))
{
/* In the case of a pointer, one may want to divide by the
size of the pointed-to type. Unfortunately, this not
straightforward. The C front-end maps expressions
(int *) 5
int *p; (p + 5)
in such a way that the two INTEGER_CST nodes for "5" have
different values but identical types. In the latter
case, the 5 is multiplied by sizeof (int) in c-common.c
(pointer_int_sum) to convert it to a byte address, and
yet the type of the node is left unchanged. Argh. What
is consistent though is that the number value corresponds
to bytes (UNITS) offset.
NB: Neither of the following divisors can be trivially
used to recover the original literal:
TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (node)))
TYPE_PRECISION (TREE_TYPE (TREE_TYPE (node))) */
pp_wide_integer (pp, TREE_INT_CST_LOW (node));
pp_string (pp, "B"); /* pseudo-unit */
}
else if (tree_fits_shwi_p (node))
pp_wide_integer (pp, tree_to_shwi (node));
else if (tree_fits_uhwi_p (node))
pp_unsigned_wide_integer (pp, tree_to_uhwi (node));
else
{
wide_int val = wi::to_wide (node);
if (wi::neg_p (val, TYPE_SIGN (TREE_TYPE (node))))
{
pp_minus (pp);
val = -val;
}
print_hex (val, pp_buffer (pp)->digit_buffer);
pp_string (pp, pp_buffer (pp)->digit_buffer);
}
if ((flags & TDF_GIMPLE)
&& ! (POINTER_TYPE_P (TREE_TYPE (node))
|| (TYPE_PRECISION (TREE_TYPE (node))
< TYPE_PRECISION (integer_type_node))
|| exact_log2 (TYPE_PRECISION (TREE_TYPE (node))) == -1))
{
if (TYPE_UNSIGNED (TREE_TYPE (node)))
pp_character (pp, 'u');
if (TYPE_PRECISION (TREE_TYPE (node))
== TYPE_PRECISION (unsigned_type_node))
;
else if (TYPE_PRECISION (TREE_TYPE (node))
== TYPE_PRECISION (long_unsigned_type_node))
pp_character (pp, 'l');
else if (TYPE_PRECISION (TREE_TYPE (node))
== TYPE_PRECISION (long_long_unsigned_type_node))
pp_string (pp, "ll");
}
if (TREE_OVERFLOW (node))
pp_string (pp, "(OVF)");
break;
case POLY_INT_CST:
pp_string (pp, "POLY_INT_CST [");
dump_generic_node (pp, POLY_INT_CST_COEFF (node, 0), spc, flags, false);
for (unsigned int i = 1; i < NUM_POLY_INT_COEFFS; ++i)
{
pp_string (pp, ", ");
dump_generic_node (pp, POLY_INT_CST_COEFF (node, i),
spc, flags, false);
}
pp_string (pp, "]");
break;
case REAL_CST:
/* Code copied from print_node. */
{
REAL_VALUE_TYPE d;
if (TREE_OVERFLOW (node))
pp_string (pp, " overflow");
d = TREE_REAL_CST (node);
if (REAL_VALUE_ISINF (d))
pp_string (pp, REAL_VALUE_NEGATIVE (d) ? " -Inf" : " Inf");
else if (REAL_VALUE_ISNAN (d))
pp_string (pp, " Nan");
else
{
char string[100];
real_to_decimal (string, &d, sizeof (string), 0, 1);
pp_string (pp, string);
}
break;
}
case FIXED_CST:
{
char string[100];
fixed_to_decimal (string, TREE_FIXED_CST_PTR (node), sizeof (string));
pp_string (pp, string);
break;
}
case COMPLEX_CST:
pp_string (pp, "__complex__ (");
dump_generic_node (pp, TREE_REALPART (node), spc, flags, false);
pp_string (pp, ", ");
dump_generic_node (pp, TREE_IMAGPART (node), spc, flags, false);
pp_right_paren (pp);
break;
case STRING_CST:
{
pp_string (pp, "\"");
if (unsigned nbytes = TREE_STRING_LENGTH (node))
pretty_print_string (pp, TREE_STRING_POINTER (node), nbytes);
pp_string (pp, "\"");
break;
}
case VECTOR_CST:
{
unsigned i;
if (flags & TDF_GIMPLE)
{
pp_string (pp, "_Literal (");
dump_generic_node (pp, TREE_TYPE (node), spc, flags, false);
pp_string (pp, ") ");
}
pp_string (pp, "{ ");
unsigned HOST_WIDE_INT nunits;
if (!VECTOR_CST_NELTS (node).is_constant (&nunits))
nunits = vector_cst_encoded_nelts (node);
for (i = 0; i < nunits; ++i)
{
if (i != 0)
pp_string (pp, ", ");
dump_generic_node (pp, VECTOR_CST_ELT (node, i),
spc, flags, false);
}
if (!VECTOR_CST_NELTS (node).is_constant ())
pp_string (pp, ", ...");
pp_string (pp, " }");
}
break;
case FUNCTION_TYPE:
case METHOD_TYPE:
dump_generic_node (pp, TREE_TYPE (node), spc, flags, false);
pp_space (pp);
if (TREE_CODE (node) == METHOD_TYPE)
{
if (TYPE_METHOD_BASETYPE (node))
dump_generic_node (pp, TYPE_NAME (TYPE_METHOD_BASETYPE (node)),
spc, flags, false);
else
pp_string (pp, "<null method basetype>");
pp_colon_colon (pp);
}
if (TYPE_IDENTIFIER (node))
dump_generic_node (pp, TYPE_NAME (node), spc, flags, false);
else if (TYPE_NAME (node) && DECL_NAME (TYPE_NAME (node)))
dump_decl_name (pp, TYPE_NAME (node), flags);
else if (flags & TDF_NOUID)
pp_printf (pp, "<Txxxx>");
else
pp_printf (pp, "<T%x>", TYPE_UID (node));
dump_function_declaration (pp, node, spc, flags);
break;
case FUNCTION_DECL:
case CONST_DECL:
dump_decl_name (pp, node, flags);
break;
case LABEL_DECL:
if (DECL_NAME (node))
dump_decl_name (pp, node, flags);
else if (LABEL_DECL_UID (node) != -1)
{
if (flags & TDF_GIMPLE)
pp_printf (pp, "L%d", (int) LABEL_DECL_UID (node));
else
pp_printf (pp, "<L%d>", (int) LABEL_DECL_UID (node));
}
else
{
if (flags & TDF_NOUID)
pp_string (pp, "<D.xxxx>");
else
{
if (flags & TDF_GIMPLE)
pp_printf (pp, "<D%u>", DECL_UID (node));
else
pp_printf (pp, "<D.%u>", DECL_UID (node));
}
}
break;
case TYPE_DECL:
if (DECL_IS_BUILTIN (node))
{
/* Don't print the declaration of built-in types. */
break;
}
if (DECL_NAME (node))
dump_decl_name (pp, node, flags);
else if (TYPE_NAME (TREE_TYPE (node)) != node)
{
pp_string (pp, (TREE_CODE (TREE_TYPE (node)) == UNION_TYPE
? "union" : "struct "));
dump_generic_node (pp, TREE_TYPE (node), spc, flags, false);
}
else
pp_string (pp, "<anon>");
break;
case VAR_DECL:
case PARM_DECL:
case FIELD_DECL:
case DEBUG_EXPR_DECL:
case NAMESPACE_DECL:
case NAMELIST_DECL:
dump_decl_name (pp, node, flags);
break;
case RESULT_DECL:
pp_string (pp, "<retval>");
break;
case COMPONENT_REF:
op0 = TREE_OPERAND (node, 0);
str = ".";
if (op0
&& (TREE_CODE (op0) == INDIRECT_REF
|| (TREE_CODE (op0) == MEM_REF
&& TREE_CODE (TREE_OPERAND (op0, 0)) != ADDR_EXPR
&& integer_zerop (TREE_OPERAND (op0, 1))
/* Dump the types of INTEGER_CSTs explicitly, for we
can't infer them and MEM_ATTR caching will share
MEM_REFs with differently-typed op0s. */
&& TREE_CODE (TREE_OPERAND (op0, 0)) != INTEGER_CST
/* Released SSA_NAMES have no TREE_TYPE. */
&& TREE_TYPE (TREE_OPERAND (op0, 0)) != NULL_TREE
/* Same pointer types, but ignoring POINTER_TYPE vs.
REFERENCE_TYPE. */
&& (TREE_TYPE (TREE_TYPE (TREE_OPERAND (op0, 0)))
== TREE_TYPE (TREE_TYPE (TREE_OPERAND (op0, 1))))
&& (TYPE_MODE (TREE_TYPE (TREE_OPERAND (op0, 0)))
== TYPE_MODE (TREE_TYPE (TREE_OPERAND (op0, 1))))
&& (TYPE_REF_CAN_ALIAS_ALL (TREE_TYPE (TREE_OPERAND (op0, 0)))
== TYPE_REF_CAN_ALIAS_ALL (TREE_TYPE (TREE_OPERAND (op0, 1))))
/* Same value types ignoring qualifiers. */
&& (TYPE_MAIN_VARIANT (TREE_TYPE (op0))
== TYPE_MAIN_VARIANT
(TREE_TYPE (TREE_TYPE (TREE_OPERAND (op0, 1)))))
&& MR_DEPENDENCE_CLIQUE (op0) == 0)))
{
op0 = TREE_OPERAND (op0, 0);
str = "->";
}
if (op_prio (op0) < op_prio (node))
pp_left_paren (pp);
dump_generic_node (pp, op0, spc, flags, false);
if (op_prio (op0) < op_prio (node))
pp_right_paren (pp);
pp_string (pp, str);
dump_generic_node (pp, TREE_OPERAND (node, 1), spc, flags, false);
op0 = component_ref_field_offset (node);
if (op0 && TREE_CODE (op0) != INTEGER_CST)
{
pp_string (pp, "{off: ");
dump_generic_node (pp, op0, spc, flags, false);
pp_right_brace (pp);
}
break;
case BIT_FIELD_REF:
if (flags & TDF_GIMPLE)
{
pp_string (pp, "__BIT_FIELD_REF <");
dump_generic_node (pp, TREE_TYPE (node),
spc, flags | TDF_SLIM, false);
if (TYPE_ALIGN (TREE_TYPE (node))
!= TYPE_ALIGN (TYPE_MAIN_VARIANT (TREE_TYPE (node))))
{
pp_string (pp, ", ");
pp_decimal_int (pp, TYPE_ALIGN (TREE_TYPE (node)));
}
pp_greater (pp);
pp_string (pp, " (");
dump_generic_node (pp, TREE_OPERAND (node, 0), spc,
flags | TDF_SLIM, false);
pp_string (pp, ", ");
dump_generic_node (pp, TREE_OPERAND (node, 1), spc,
flags | TDF_SLIM, false);
pp_string (pp, ", ");
dump_generic_node (pp, TREE_OPERAND (node, 2), spc,
flags | TDF_SLIM, false);
pp_right_paren (pp);
}
else
{
pp_string (pp, "BIT_FIELD_REF <");
dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (pp, ", ");
dump_generic_node (pp, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (pp, ", ");
dump_generic_node (pp, TREE_OPERAND (node, 2), spc, flags, false);
pp_greater (pp);
}
break;
case BIT_INSERT_EXPR:
pp_string (pp, "BIT_INSERT_EXPR <");
dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (pp, ", ");
dump_generic_node (pp, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (pp, ", ");
dump_generic_node (pp, TREE_OPERAND (node, 2), spc, flags, false);
pp_string (pp, " (");
if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (node, 1))))
pp_decimal_int (pp,
TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (node, 1))));
else
dump_generic_node (pp, TYPE_SIZE (TREE_TYPE (TREE_OPERAND (node, 1))),
spc, flags, false);
pp_string (pp, " bits)>");
break;
case ARRAY_REF:
case ARRAY_RANGE_REF:
op0 = TREE_OPERAND (node, 0);
if (op_prio (op0) < op_prio (node))
pp_left_paren (pp);
dump_generic_node (pp, op0, spc, flags, false);
if (op_prio (op0) < op_prio (node))
pp_right_paren (pp);
pp_left_bracket (pp);
dump_generic_node (pp, TREE_OPERAND (node, 1), spc, flags, false);
if (TREE_CODE (node) == ARRAY_RANGE_REF)
pp_string (pp, " ...");
pp_right_bracket (pp);
op0 = array_ref_low_bound (node);
op1 = array_ref_element_size (node);
if (!integer_zerop (op0)
|| TREE_OPERAND (node, 2)
|| TREE_OPERAND (node, 3))
{
pp_string (pp, "{lb: ");
dump_generic_node (pp, op0, spc, flags, false);
pp_string (pp, " sz: ");
dump_generic_node (pp, op1, spc, flags, false);
pp_right_brace (pp);
}
break;
case CONSTRUCTOR:
{
unsigned HOST_WIDE_INT ix;
tree field, val;
bool is_struct_init = false;
bool is_array_init = false;
widest_int curidx;
if (flags & TDF_GIMPLE)
{
pp_string (pp, "_Literal (");
dump_generic_node (pp, TREE_TYPE (node), spc, flags, false);
pp_string (pp, ") ");
}
pp_left_brace (pp);
if (TREE_CLOBBER_P (node))
pp_string (pp, "CLOBBER");
else if (TREE_CODE (TREE_TYPE (node)) == RECORD_TYPE
|| TREE_CODE (TREE_TYPE (node)) == UNION_TYPE)
is_struct_init = true;
else if (TREE_CODE (TREE_TYPE (node)) == ARRAY_TYPE
&& TYPE_DOMAIN (TREE_TYPE (node))
&& TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (node)))
&& TREE_CODE (TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (node))))
== INTEGER_CST)
{
tree minv = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (node)));
is_array_init = true;
curidx = wi::to_widest (minv);
}
FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (node), ix, field, val)
{
if (field)
{
if (is_struct_init)
{
pp_dot (pp);
dump_generic_node (pp, field, spc, flags, false);
pp_equal (pp);
}
else if (is_array_init
&& (TREE_CODE (field) != INTEGER_CST
|| curidx != wi::to_widest (field)))
{
pp_left_bracket (pp);
if (TREE_CODE (field) == RANGE_EXPR)
{
dump_generic_node (pp, TREE_OPERAND (field, 0), spc,
flags, false);
pp_string (pp, " ... ");
dump_generic_node (pp, TREE_OPERAND (field, 1), spc,
flags, false);
if (TREE_CODE (TREE_OPERAND (field, 1)) == INTEGER_CST)
curidx = wi::to_widest (TREE_OPERAND (field, 1));
}
else
dump_generic_node (pp, field, spc, flags, false);
if (TREE_CODE (field) == INTEGER_CST)
curidx = wi::to_widest (field);
pp_string (pp, "]=");
}
}
if (is_array_init)
curidx += 1;
if (val && TREE_CODE (val) == ADDR_EXPR)
if (TREE_CODE (TREE_OPERAND (val, 0)) == FUNCTION_DECL)
val = TREE_OPERAND (val, 0);
if (val && TREE_CODE (val) == FUNCTION_DECL)
dump_decl_name (pp, val, flags);
else
dump_generic_node (pp, val, spc, flags, false);
if (ix != CONSTRUCTOR_NELTS (node) - 1)
{
pp_comma (pp);
pp_space (pp);
}
}
pp_right_brace (pp);
}
break;
case COMPOUND_EXPR:
{
tree *tp;
if (flags & TDF_SLIM)
{
pp_string (pp, "<COMPOUND_EXPR>");
break;
}
dump_generic_node (pp, TREE_OPERAND (node, 0),
spc, flags, !(flags & TDF_SLIM));
if (flags & TDF_SLIM)
newline_and_indent (pp, spc);
else
{
pp_comma (pp);
pp_space (pp);
}
for (tp = &TREE_OPERAND (node, 1);
TREE_CODE (*tp) == COMPOUND_EXPR;
tp = &TREE_OPERAND (*tp, 1))
{
dump_generic_node (pp, TREE_OPERAND (*tp, 0),
spc, flags, !(flags & TDF_SLIM));
if (flags & TDF_SLIM)
newline_and_indent (pp, spc);
else
{
pp_comma (pp);
pp_space (pp);
}
}
dump_generic_node (pp, *tp, spc, flags, !(flags & TDF_SLIM));
}
break;
case STATEMENT_LIST:
{
tree_stmt_iterator si;
bool first = true;
if (flags & TDF_SLIM)
{
pp_string (pp, "<STATEMENT_LIST>");
break;
}
for (si = tsi_start (node); !tsi_end_p (si); tsi_next (&si))
{
if (!first)
newline_and_indent (pp, spc);
else
first = false;
dump_generic_node (pp, tsi_stmt (si), spc, flags, true);
}
}
break;
case MODIFY_EXPR:
case INIT_EXPR:
dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags,
false);
pp_space (pp);
pp_equal (pp);
pp_space (pp);
dump_generic_node (pp, TREE_OPERAND (node, 1), spc, flags,
false);
break;
case TARGET_EXPR:
pp_string (pp, "TARGET_EXPR <");
dump_generic_node (pp, TARGET_EXPR_SLOT (node), spc, flags, false);
pp_comma (pp);
pp_space (pp);
dump_generic_node (pp, TARGET_EXPR_INITIAL (node), spc, flags, false);
pp_greater (pp);
break;
case DECL_EXPR:
print_declaration (pp, DECL_EXPR_DECL (node), spc, flags);
is_stmt = false;
break;
case COND_EXPR:
if (TREE_TYPE (node) == NULL || TREE_TYPE (node) == void_type_node)
{
pp_string (pp, "if (");
dump_generic_node (pp, COND_EXPR_COND (node), spc, flags, false);
pp_right_paren (pp);
/* The lowered cond_exprs should always be printed in full. */
if (COND_EXPR_THEN (node)
&& (IS_EMPTY_STMT (COND_EXPR_THEN (node))
|| TREE_CODE (COND_EXPR_THEN (node)) == GOTO_EXPR)
&& COND_EXPR_ELSE (node)
&& (IS_EMPTY_STMT (COND_EXPR_ELSE (node))
|| TREE_CODE (COND_EXPR_ELSE (node)) == GOTO_EXPR))
{
pp_space (pp);
dump_generic_node (pp, COND_EXPR_THEN (node),
0, flags, true);
if (!IS_EMPTY_STMT (COND_EXPR_ELSE (node)))
{
pp_string (pp, " else ");
dump_generic_node (pp, COND_EXPR_ELSE (node),
0, flags, true);
}
}
else if (!(flags & TDF_SLIM))
{
/* Output COND_EXPR_THEN. */
if (COND_EXPR_THEN (node))
{
newline_and_indent (pp, spc+2);
pp_left_brace (pp);
newline_and_indent (pp, spc+4);
dump_generic_node (pp, COND_EXPR_THEN (node), spc+4,
flags, true);
newline_and_indent (pp, spc+2);
pp_right_brace (pp);
}
/* Output COND_EXPR_ELSE. */
if (COND_EXPR_ELSE (node)
&& !IS_EMPTY_STMT (COND_EXPR_ELSE (node)))
{
newline_and_indent (pp, spc);
pp_string (pp, "else");
newline_and_indent (pp, spc+2);
pp_left_brace (pp);
newline_and_indent (pp, spc+4);
dump_generic_node (pp, COND_EXPR_ELSE (node), spc+4,
flags, true);
newline_and_indent (pp, spc+2);
pp_right_brace (pp);
}
}
is_expr = false;
}
else
{
dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false);
pp_space (pp);
pp_question (pp);
pp_space (pp);
dump_generic_node (pp, TREE_OPERAND (node, 1), spc, flags, false);
pp_space (pp);
pp_colon (pp);
pp_space (pp);
dump_generic_node (pp, TREE_OPERAND (node, 2), spc, flags, false);
}
break;
case BIND_EXPR:
pp_left_brace (pp);
if (!(flags & TDF_SLIM))
{
if (BIND_EXPR_VARS (node))
{
pp_newline (pp);
for (op0 = BIND_EXPR_VARS (node); op0; op0 = DECL_CHAIN (op0))
{
print_declaration (pp, op0, spc+2, flags);
pp_newline (pp);
}
}
newline_and_indent (pp, spc+2);
dump_generic_node (pp, BIND_EXPR_BODY (node), spc+2, flags, true);
newline_and_indent (pp, spc);
pp_right_brace (pp);
}
is_expr = false;
break;
case CALL_EXPR:
if (CALL_EXPR_FN (node) != NULL_TREE)
print_call_name (pp, CALL_EXPR_FN (node), flags);
else
{
pp_dot (pp);
pp_string (pp, internal_fn_name (CALL_EXPR_IFN (node)));
}
/* Print parameters. */
pp_space (pp);
pp_left_paren (pp);
{
tree arg;
call_expr_arg_iterator iter;
FOR_EACH_CALL_EXPR_ARG (arg, iter, node)
{
dump_generic_node (pp, arg, spc, flags, false);
if (more_call_expr_args_p (&iter))
{
pp_comma (pp);
pp_space (pp);
}
}
}
if (CALL_EXPR_VA_ARG_PACK (node))
{
if (call_expr_nargs (node) > 0)
{
pp_comma (pp);
pp_space (pp);
}
pp_string (pp, "__builtin_va_arg_pack ()");
}
pp_right_paren (pp);
op1 = CALL_EXPR_STATIC_CHAIN (node);
if (op1)
{
pp_string (pp, " [static-chain: ");
dump_generic_node (pp, op1, spc, flags, false);
pp_right_bracket (pp);
}
if (CALL_EXPR_RETURN_SLOT_OPT (node))
pp_string (pp, " [return slot optimization]");
if (CALL_EXPR_TAILCALL (node))
pp_string (pp, " [tail call]");
break;
case WITH_CLEANUP_EXPR:
NIY;
break;
case CLEANUP_POINT_EXPR:
pp_string (pp, "<<cleanup_point ");
dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (pp, ">>");
break;
case PLACEHOLDER_EXPR:
pp_string (pp, "<PLACEHOLDER_EXPR ");
dump_generic_node (pp, TREE_TYPE (node), spc, flags, false);
pp_greater (pp);
break;
/* Binary arithmetic and logic expressions. */
case WIDEN_SUM_EXPR:
case WIDEN_MULT_EXPR:
case MULT_EXPR:
case MULT_HIGHPART_EXPR:
case PLUS_EXPR:
case POINTER_PLUS_EXPR:
case POINTER_DIFF_EXPR:
case MINUS_EXPR:
case TRUNC_DIV_EXPR:
case CEIL_DIV_EXPR:
case FLOOR_DIV_EXPR:
case ROUND_DIV_EXPR:
case TRUNC_MOD_EXPR:
case CEIL_MOD_EXPR:
case FLOOR_MOD_EXPR:
case ROUND_MOD_EXPR:
case RDIV_EXPR:
case EXACT_DIV_EXPR:
case LSHIFT_EXPR:
case RSHIFT_EXPR:
case LROTATE_EXPR:
case RROTATE_EXPR:
case WIDEN_LSHIFT_EXPR:
case BIT_IOR_EXPR:
case BIT_XOR_EXPR:
case BIT_AND_EXPR:
case TRUTH_ANDIF_EXPR:
case TRUTH_ORIF_EXPR:
case TRUTH_AND_EXPR:
case TRUTH_OR_EXPR:
case TRUTH_XOR_EXPR:
case LT_EXPR:
case LE_EXPR:
case GT_EXPR:
case GE_EXPR:
case EQ_EXPR:
case NE_EXPR:
case UNLT_EXPR:
case UNLE_EXPR:
case UNGT_EXPR:
case UNGE_EXPR:
case UNEQ_EXPR:
case LTGT_EXPR:
case ORDERED_EXPR:
case UNORDERED_EXPR:
{
const char *op = op_symbol (node);
op0 = TREE_OPERAND (node, 0);
op1 = TREE_OPERAND (node, 1);
/* When the operands are expressions with less priority,
keep semantics of the tree representation. */
if (op_prio (op0) <= op_prio (node))
{
pp_left_paren (pp);
dump_generic_node (pp, op0, spc, flags, false);
pp_right_paren (pp);
}
else
dump_generic_node (pp, op0, spc, flags, false);
pp_space (pp);
pp_string (pp, op);
pp_space (pp);
/* When the operands are expressions with less priority,
keep semantics of the tree representation. */
if (op_prio (op1) <= op_prio (node))
{
pp_left_paren (pp);
dump_generic_node (pp, op1, spc, flags, false);
pp_right_paren (pp);
}
else
dump_generic_node (pp, op1, spc, flags, false);
}
break;
/* Unary arithmetic and logic expressions. */
case NEGATE_EXPR:
case BIT_NOT_EXPR:
case TRUTH_NOT_EXPR:
case ADDR_EXPR:
case PREDECREMENT_EXPR:
case PREINCREMENT_EXPR:
case INDIRECT_REF:
if (TREE_CODE (node) == ADDR_EXPR
&& (TREE_CODE (TREE_OPERAND (node, 0)) == STRING_CST
|| TREE_CODE (TREE_OPERAND (node, 0)) == FUNCTION_DECL))
; /* Do not output '&' for strings and function pointers. */
else
pp_string (pp, op_symbol (node));
if (op_prio (TREE_OPERAND (node, 0)) < op_prio (node))
{
pp_left_paren (pp);
dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false);
pp_right_paren (pp);
}
else
dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false);
break;
case POSTDECREMENT_EXPR:
case POSTINCREMENT_EXPR:
if (op_prio (TREE_OPERAND (node, 0)) < op_prio (node))
{
pp_left_paren (pp);
dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false);
pp_right_paren (pp);
}
else
dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (pp, op_symbol (node));
break;
case MIN_EXPR:
pp_string (pp, "MIN_EXPR <");
dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (pp, ", ");
dump_generic_node (pp, TREE_OPERAND (node, 1), spc, flags, false);
pp_greater (pp);
break;
case MAX_EXPR:
pp_string (pp, "MAX_EXPR <");
dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (pp, ", ");
dump_generic_node (pp, TREE_OPERAND (node, 1), spc, flags, false);
pp_greater (pp);
break;
case ABS_EXPR:
pp_string (pp, "ABS_EXPR <");
dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false);
pp_greater (pp);
break;
case ABSU_EXPR:
pp_string (pp, "ABSU_EXPR <");
dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false);
pp_greater (pp);
break;
case RANGE_EXPR:
NIY;
break;
case ADDR_SPACE_CONVERT_EXPR:
case FIXED_CONVERT_EXPR:
case FIX_TRUNC_EXPR:
case FLOAT_EXPR:
CASE_CONVERT:
type = TREE_TYPE (node);
op0 = TREE_OPERAND (node, 0);
if (type != TREE_TYPE (op0))
{
pp_left_paren (pp);
dump_generic_node (pp, type, spc, flags, false);
pp_string (pp, ") ");
}
if (op_prio (op0) < op_prio (node))
pp_left_paren (pp);
dump_generic_node (pp, op0, spc, flags, false);
if (op_prio (op0) < op_prio (node))
pp_right_paren (pp);
break;
case VIEW_CONVERT_EXPR:
if (flags & TDF_GIMPLE)
pp_string (pp, "__VIEW_CONVERT <");
else
pp_string (pp, "VIEW_CONVERT_EXPR<");
dump_generic_node (pp, TREE_TYPE (node), spc, flags, false);
pp_string (pp, ">(");
dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false);
pp_right_paren (pp);
break;
case PAREN_EXPR:
pp_string (pp, "((");
dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (pp, "))");
break;
case NON_LVALUE_EXPR:
pp_string (pp, "NON_LVALUE_EXPR <");
dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false);
pp_greater (pp);
break;
case SAVE_EXPR:
pp_string (pp, "SAVE_EXPR <");
dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false);
pp_greater (pp);
break;
case COMPLEX_EXPR:
pp_string (pp, "COMPLEX_EXPR <");
dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (pp, ", ");
dump_generic_node (pp, TREE_OPERAND (node, 1), spc, flags, false);
pp_greater (pp);
break;
case CONJ_EXPR:
pp_string (pp, "CONJ_EXPR <");
dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false);
pp_greater (pp);
break;
case REALPART_EXPR:
if (flags & TDF_GIMPLE)
{
pp_string (pp, "__real ");
dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false);
}
else
{
pp_string (pp, "REALPART_EXPR <");
dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false);
pp_greater (pp);
}
break;
case IMAGPART_EXPR:
if (flags & TDF_GIMPLE)
{
pp_string (pp, "__imag ");
dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false);
}
else
{
pp_string (pp, "IMAGPART_EXPR <");
dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false);
pp_greater (pp);
}
break;
case VA_ARG_EXPR:
pp_string (pp, "VA_ARG_EXPR <");
dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false);
pp_greater (pp);
break;
case TRY_FINALLY_EXPR:
case TRY_CATCH_EXPR:
pp_string (pp, "try");
newline_and_indent (pp, spc+2);
pp_left_brace (pp);
newline_and_indent (pp, spc+4);
dump_generic_node (pp, TREE_OPERAND (node, 0), spc+4, flags, true);
newline_and_indent (pp, spc+2);
pp_right_brace (pp);
newline_and_indent (pp, spc);
if (TREE_CODE (node) == TRY_CATCH_EXPR)
{
node = TREE_OPERAND (node, 1);
pp_string (pp, "catch");
}
else
{
gcc_assert (TREE_CODE (node) == TRY_FINALLY_EXPR);
node = TREE_OPERAND (node, 1);
pp_string (pp, "finally");
if (TREE_CODE (node) == EH_ELSE_EXPR)
{
newline_and_indent (pp, spc+2);
pp_left_brace (pp);
newline_and_indent (pp, spc+4);
dump_generic_node (pp, TREE_OPERAND (node, 0), spc+4,
flags, true);
newline_and_indent (pp, spc+2);
pp_right_brace (pp);
newline_and_indent (pp, spc);
node = TREE_OPERAND (node, 1);
pp_string (pp, "else");
}
}
newline_and_indent (pp, spc+2);
pp_left_brace (pp);
newline_and_indent (pp, spc+4);
dump_generic_node (pp, node, spc+4, flags, true);
newline_and_indent (pp, spc+2);
pp_right_brace (pp);
is_expr = false;
break;
case CATCH_EXPR:
pp_string (pp, "catch (");
dump_generic_node (pp, CATCH_TYPES (node), spc+2, flags, false);
pp_right_paren (pp);
newline_and_indent (pp, spc+2);
pp_left_brace (pp);
newline_and_indent (pp, spc+4);
dump_generic_node (pp, CATCH_BODY (node), spc+4, flags, true);
newline_and_indent (pp, spc+2);
pp_right_brace (pp);
is_expr = false;
break;
case EH_FILTER_EXPR:
pp_string (pp, "<<<eh_filter (");
dump_generic_node (pp, EH_FILTER_TYPES (node), spc+2, flags, false);
pp_string (pp, ")>>>");
newline_and_indent (pp, spc+2);
pp_left_brace (pp);
newline_and_indent (pp, spc+4);
dump_generic_node (pp, EH_FILTER_FAILURE (node), spc+4, flags, true);
newline_and_indent (pp, spc+2);
pp_right_brace (pp);
is_expr = false;
break;
case LABEL_EXPR:
op0 = TREE_OPERAND (node, 0);
/* If this is for break or continue, don't bother printing it. */
if (DECL_NAME (op0))
{
const char *name = IDENTIFIER_POINTER (DECL_NAME (op0));
if (strcmp (name, "break") == 0
|| strcmp (name, "continue") == 0)
break;
}
dump_generic_node (pp, op0, spc, flags, false);
pp_colon (pp);
if (DECL_NONLOCAL (op0))
pp_string (pp, " [non-local]");
break;
case LOOP_EXPR:
pp_string (pp, "while (1)");
if (!(flags & TDF_SLIM))
{
newline_and_indent (pp, spc+2);
pp_left_brace (pp);
newline_and_indent (pp, spc+4);
dump_generic_node (pp, LOOP_EXPR_BODY (node), spc+4, flags, true);
newline_and_indent (pp, spc+2);
pp_right_brace (pp);
}
is_expr = false;
break;
case PREDICT_EXPR:
pp_string (pp, "// predicted ");
if (PREDICT_EXPR_OUTCOME (node))
pp_string (pp, "likely by ");
else
pp_string (pp, "unlikely by ");
pp_string (pp, predictor_name (PREDICT_EXPR_PREDICTOR (node)));
pp_string (pp, " predictor.");
break;
case ANNOTATE_EXPR:
pp_string (pp, "ANNOTATE_EXPR <");
dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false);
switch ((enum annot_expr_kind) TREE_INT_CST_LOW (TREE_OPERAND (node, 1)))
{
case annot_expr_ivdep_kind:
pp_string (pp, ", ivdep");
break;
case annot_expr_unroll_kind:
pp_printf (pp, ", unroll %d",
(int) TREE_INT_CST_LOW (TREE_OPERAND (node, 2)));
break;
case annot_expr_no_vector_kind:
pp_string (pp, ", no-vector");
break;
case annot_expr_vector_kind:
pp_string (pp, ", vector");
break;
case annot_expr_parallel_kind:
pp_string (pp, ", parallel");
break;
default:
gcc_unreachable ();
}
pp_greater (pp);
break;
case RETURN_EXPR:
pp_string (pp, "return");
op0 = TREE_OPERAND (node, 0);
if (op0)
{
pp_space (pp);
if (TREE_CODE (op0) == MODIFY_EXPR)
dump_generic_node (pp, TREE_OPERAND (op0, 1),
spc, flags, false);
else
dump_generic_node (pp, op0, spc, flags, false);
}
break;
case EXIT_EXPR:
pp_string (pp, "if (");
dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (pp, ") break");
break;
case SWITCH_EXPR:
pp_string (pp, "switch (");
dump_generic_node (pp, SWITCH_COND (node), spc, flags, false);
pp_right_paren (pp);
if (!(flags & TDF_SLIM))
{
newline_and_indent (pp, spc+2);
pp_left_brace (pp);
if (SWITCH_BODY (node))
{
newline_and_indent (pp, spc+4);
dump_generic_node (pp, SWITCH_BODY (node), spc+4, flags,
true);
}
newline_and_indent (pp, spc+2);
pp_right_brace (pp);
}
is_expr = false;
break;
case GOTO_EXPR:
op0 = GOTO_DESTINATION (node);
if (TREE_CODE (op0) != SSA_NAME && DECL_P (op0) && DECL_NAME (op0))
{
const char *name = IDENTIFIER_POINTER (DECL_NAME (op0));
if (strcmp (name, "break") == 0
|| strcmp (name, "continue") == 0)
{
pp_string (pp, name);
break;
}
}
pp_string (pp, "goto ");
dump_generic_node (pp, op0, spc, flags, false);
break;
case ASM_EXPR:
pp_string (pp, "__asm__");
if (ASM_VOLATILE_P (node))
pp_string (pp, " __volatile__");
pp_left_paren (pp);
dump_generic_node (pp, ASM_STRING (node), spc, flags, false);
pp_colon (pp);
dump_generic_node (pp, ASM_OUTPUTS (node), spc, flags, false);
pp_colon (pp);
dump_generic_node (pp, ASM_INPUTS (node), spc, flags, false);
if (ASM_CLOBBERS (node))
{
pp_colon (pp);
dump_generic_node (pp, ASM_CLOBBERS (node), spc, flags, false);
}
pp_right_paren (pp);
break;
case CASE_LABEL_EXPR:
if (CASE_LOW (node) && CASE_HIGH (node))
{
pp_string (pp, "case ");
dump_generic_node (pp, CASE_LOW (node), spc, flags, false);
pp_string (pp, " ... ");
dump_generic_node (pp, CASE_HIGH (node), spc, flags, false);
}
else if (CASE_LOW (node))
{
pp_string (pp, "case ");
dump_generic_node (pp, CASE_LOW (node), spc, flags, false);
}
else
pp_string (pp, "default");
pp_colon (pp);
break;
case OBJ_TYPE_REF:
pp_string (pp, "OBJ_TYPE_REF(");
dump_generic_node (pp, OBJ_TYPE_REF_EXPR (node), spc, flags, false);
pp_semicolon (pp);
/* We omit the class type for -fcompare-debug because we may
drop TYPE_BINFO early depending on debug info, and then
virtual_method_call_p would return false, whereas when
TYPE_BINFO is preserved it may still return true and then
we'd print the class type. Compare tree and rtl dumps for
libstdc++-prettyprinters/shared_ptr.cc with and without -g,
for example, at occurrences of OBJ_TYPE_REF. */
if (!(flags & (TDF_SLIM | TDF_COMPARE_DEBUG))
&& virtual_method_call_p (node, true))
{
pp_string (pp, "(");
dump_generic_node (pp, obj_type_ref_class (node, true),
spc, flags, false);
pp_string (pp, ")");
}
dump_generic_node (pp, OBJ_TYPE_REF_OBJECT (node), spc, flags, false);
pp_arrow (pp);
dump_generic_node (pp, OBJ_TYPE_REF_TOKEN (node), spc, flags, false);
pp_right_paren (pp);
break;
case SSA_NAME:
if (SSA_NAME_IDENTIFIER (node))
{
if ((flags & TDF_NOUID)
&& SSA_NAME_VAR (node)
&& DECL_NAMELESS (SSA_NAME_VAR (node)))
dump_fancy_name (pp, SSA_NAME_IDENTIFIER (node));
else if (! (flags & TDF_GIMPLE)
|| SSA_NAME_VAR (node))
dump_generic_node (pp, SSA_NAME_IDENTIFIER (node),
spc, flags, false);
}
pp_underscore (pp);
pp_decimal_int (pp, SSA_NAME_VERSION (node));
if (SSA_NAME_IS_DEFAULT_DEF (node))
pp_string (pp, "(D)");
if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (node))
pp_string (pp, "(ab)");
break;
case WITH_SIZE_EXPR:
pp_string (pp, "WITH_SIZE_EXPR <");
dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (pp, ", ");
dump_generic_node (pp, TREE_OPERAND (node, 1), spc, flags, false);
pp_greater (pp);
break;
case ASSERT_EXPR:
pp_string (pp, "ASSERT_EXPR <");
dump_generic_node (pp, ASSERT_EXPR_VAR (node), spc, flags, false);
pp_string (pp, ", ");
dump_generic_node (pp, ASSERT_EXPR_COND (node), spc, flags, false);
pp_greater (pp);
break;
case SCEV_KNOWN:
pp_string (pp, "scev_known");
break;
case SCEV_NOT_KNOWN:
pp_string (pp, "scev_not_known");
break;
case POLYNOMIAL_CHREC:
pp_left_brace (pp);
dump_generic_node (pp, CHREC_LEFT (node), spc, flags, false);
pp_string (pp, ", +, ");
dump_generic_node (pp, CHREC_RIGHT (node), spc, flags, false);
pp_printf (pp, "}_%u", CHREC_VARIABLE (node));
is_stmt = false;
break;
case REALIGN_LOAD_EXPR:
pp_string (pp, "REALIGN_LOAD <");
dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (pp, ", ");
dump_generic_node (pp, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (pp, ", ");
dump_generic_node (pp, TREE_OPERAND (node, 2), spc, flags, false);
pp_greater (pp);
break;
case VEC_COND_EXPR:
pp_string (pp, " VEC_COND_EXPR < ");
dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (pp, " , ");
dump_generic_node (pp, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (pp, " , ");
dump_generic_node (pp, TREE_OPERAND (node, 2), spc, flags, false);
pp_string (pp, " > ");
break;
case VEC_PERM_EXPR:
pp_string (pp, " VEC_PERM_EXPR < ");
dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (pp, " , ");
dump_generic_node (pp, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (pp, " , ");
dump_generic_node (pp, TREE_OPERAND (node, 2), spc, flags, false);
pp_string (pp, " > ");
break;
case DOT_PROD_EXPR:
pp_string (pp, " DOT_PROD_EXPR < ");
dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (pp, ", ");
dump_generic_node (pp, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (pp, ", ");
dump_generic_node (pp, TREE_OPERAND (node, 2), spc, flags, false);
pp_string (pp, " > ");
break;
case WIDEN_MULT_PLUS_EXPR:
pp_string (pp, " WIDEN_MULT_PLUS_EXPR < ");
dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (pp, ", ");
dump_generic_node (pp, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (pp, ", ");
dump_generic_node (pp, TREE_OPERAND (node, 2), spc, flags, false);
pp_string (pp, " > ");
break;
case WIDEN_MULT_MINUS_EXPR:
pp_string (pp, " WIDEN_MULT_MINUS_EXPR < ");
dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (pp, ", ");
dump_generic_node (pp, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (pp, ", ");
dump_generic_node (pp, TREE_OPERAND (node, 2), spc, flags, false);
pp_string (pp, " > ");
break;
case OACC_PARALLEL:
pp_string (pp, "#pragma acc parallel");
goto dump_omp_clauses_body;
case OACC_KERNELS:
pp_string (pp, "#pragma acc kernels");
goto dump_omp_clauses_body;
case OACC_SERIAL:
pp_string (pp, "#pragma acc serial");
goto dump_omp_clauses_body;
case OACC_DATA:
pp_string (pp, "#pragma acc data");
dump_omp_clauses (pp, OACC_DATA_CLAUSES (node), spc, flags);
goto dump_omp_body;
case OACC_HOST_DATA:
pp_string (pp, "#pragma acc host_data");
dump_omp_clauses (pp, OACC_HOST_DATA_CLAUSES (node), spc, flags);
goto dump_omp_body;
case OACC_DECLARE:
pp_string (pp, "#pragma acc declare");
dump_omp_clauses (pp, OACC_DECLARE_CLAUSES (node), spc, flags);
break;
case OACC_UPDATE:
pp_string (pp, "#pragma acc update");
dump_omp_clauses (pp, OACC_UPDATE_CLAUSES (node), spc, flags);
break;
case OACC_ENTER_DATA:
pp_string (pp, "#pragma acc enter data");
dump_omp_clauses (pp, OACC_ENTER_DATA_CLAUSES (node), spc, flags);
break;
case OACC_EXIT_DATA:
pp_string (pp, "#pragma acc exit data");
dump_omp_clauses (pp, OACC_EXIT_DATA_CLAUSES (node), spc, flags);
break;
case OACC_CACHE:
pp_string (pp, "#pragma acc cache");
dump_omp_clauses (pp, OACC_CACHE_CLAUSES (node), spc, flags);
break;
case OMP_PARALLEL:
pp_string (pp, "#pragma omp parallel");
dump_omp_clauses (pp, OMP_PARALLEL_CLAUSES (node), spc, flags);
goto dump_omp_body;
dump_omp_clauses_body:
dump_omp_clauses (pp, OMP_CLAUSES (node), spc, flags);
goto dump_omp_body;
dump_omp_body:
if (!(flags & TDF_SLIM) && OMP_BODY (node))
{
newline_and_indent (pp, spc + 2);
pp_left_brace (pp);
newline_and_indent (pp, spc + 4);
dump_generic_node (pp, OMP_BODY (node), spc + 4, flags, false);
newline_and_indent (pp, spc + 2);
pp_right_brace (pp);
}
is_expr = false;
break;
case OMP_TASK:
pp_string (pp, OMP_TASK_BODY (node) ? "#pragma omp task"
: "#pragma omp taskwait");
dump_omp_clauses (pp, OMP_TASK_CLAUSES (node), spc, flags);
goto dump_omp_body;
case OMP_FOR:
pp_string (pp, "#pragma omp for");
goto dump_omp_loop;
case OMP_SIMD:
pp_string (pp, "#pragma omp simd");
goto dump_omp_loop;
case OMP_DISTRIBUTE:
pp_string (pp, "#pragma omp distribute");
goto dump_omp_loop;
case OMP_TASKLOOP:
pp_string (pp, "#pragma omp taskloop");
goto dump_omp_loop;
case OMP_LOOP:
pp_string (pp, "#pragma omp loop");
goto dump_omp_loop;
case OACC_LOOP:
pp_string (pp, "#pragma acc loop");
goto dump_omp_loop;
case OMP_TEAMS:
pp_string (pp, "#pragma omp teams");
dump_omp_clauses (pp, OMP_TEAMS_CLAUSES (node), spc, flags);
goto dump_omp_body;
case OMP_TARGET_DATA:
pp_string (pp, "#pragma omp target data");
dump_omp_clauses (pp, OMP_TARGET_DATA_CLAUSES (node), spc, flags);
goto dump_omp_body;
case OMP_TARGET_ENTER_DATA:
pp_string (pp, "#pragma omp target enter data");
dump_omp_clauses (pp, OMP_TARGET_ENTER_DATA_CLAUSES (node), spc, flags);
is_expr = false;
break;
case OMP_TARGET_EXIT_DATA:
pp_string (pp, "#pragma omp target exit data");
dump_omp_clauses (pp, OMP_TARGET_EXIT_DATA_CLAUSES (node), spc, flags);
is_expr = false;
break;
case OMP_TARGET:
pp_string (pp, "#pragma omp target");
dump_omp_clauses (pp, OMP_TARGET_CLAUSES (node), spc, flags);
goto dump_omp_body;
case OMP_TARGET_UPDATE:
pp_string (pp, "#pragma omp target update");
dump_omp_clauses (pp, OMP_TARGET_UPDATE_CLAUSES (node), spc, flags);
is_expr = false;
break;
dump_omp_loop:
dump_omp_clauses (pp, OMP_FOR_CLAUSES (node), spc, flags);
if (!(flags & TDF_SLIM))
{
int i;
if (OMP_FOR_PRE_BODY (node))
{
newline_and_indent (pp, spc + 2);
pp_left_brace (pp);
spc += 4;
newline_and_indent (pp, spc);
dump_generic_node (pp, OMP_FOR_PRE_BODY (node),
spc, flags, false);
}
if (OMP_FOR_INIT (node))
{
spc -= 2;
for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (node)); i++)
{
spc += 2;
newline_and_indent (pp, spc);
pp_string (pp, "for (");
tree init = TREE_VEC_ELT (OMP_FOR_INIT (node), i);
if (TREE_CODE (init) != MODIFY_EXPR
|| TREE_CODE (TREE_OPERAND (init, 1)) != TREE_VEC)
dump_generic_node (pp, init, spc, flags, false);
else
{
dump_generic_node (pp, TREE_OPERAND (init, 0),
spc, flags, false);
pp_string (pp, " = ");
dump_omp_loop_non_rect_expr (pp, TREE_OPERAND (init, 1),
spc, flags);
}
pp_string (pp, "; ");
tree cond = TREE_VEC_ELT (OMP_FOR_COND (node), i);
if (!COMPARISON_CLASS_P (cond)
|| TREE_CODE (TREE_OPERAND (cond, 1)) != TREE_VEC)
dump_generic_node (pp, cond, spc, flags, false);
else
{
dump_generic_node (pp, TREE_OPERAND (cond, 0),
spc, flags, false);
const char *op = op_symbol (cond);
pp_space (pp);
pp_string (pp, op);
pp_space (pp);
dump_omp_loop_non_rect_expr (pp, TREE_OPERAND (cond, 1),
spc, flags);
}
pp_string (pp, "; ");
dump_generic_node (pp,
TREE_VEC_ELT (OMP_FOR_INCR (node), i),
spc, flags, false);
pp_right_paren (pp);
}
}
if (OMP_FOR_BODY (node))
{
newline_and_indent (pp, spc + 2);
pp_left_brace (pp);
newline_and_indent (pp, spc + 4);
dump_generic_node (pp, OMP_FOR_BODY (node), spc + 4, flags,
false);
newline_and_indent (pp, spc + 2);
pp_right_brace (pp);
}
if (OMP_FOR_INIT (node))
spc -= 2 * TREE_VEC_LENGTH (OMP_FOR_INIT (node)) - 2;
if (OMP_FOR_PRE_BODY (node))
{
spc -= 4;
newline_and_indent (pp, spc + 2);
pp_right_brace (pp);
}
}
is_expr = false;
break;
case OMP_SECTIONS:
pp_string (pp, "#pragma omp sections");
dump_omp_clauses (pp, OMP_SECTIONS_CLAUSES (node), spc, flags);
goto dump_omp_body;
case OMP_SECTION:
pp_string (pp, "#pragma omp section");
goto dump_omp_body;
case OMP_SCAN:
if (OMP_SCAN_CLAUSES (node))
{
pp_string (pp, "#pragma omp scan");
dump_omp_clauses (pp, OMP_SCAN_CLAUSES (node), spc, flags);
}
goto dump_omp_body;
case OMP_MASTER:
pp_string (pp, "#pragma omp master");
goto dump_omp_body;
case OMP_TASKGROUP:
pp_string (pp, "#pragma omp taskgroup");
dump_omp_clauses (pp, OMP_TASKGROUP_CLAUSES (node), spc, flags);
goto dump_omp_body;
case OMP_ORDERED:
pp_string (pp, "#pragma omp ordered");
dump_omp_clauses (pp, OMP_ORDERED_CLAUSES (node), spc, flags);
goto dump_omp_body;
case OMP_CRITICAL:
pp_string (pp, "#pragma omp critical");
if (OMP_CRITICAL_NAME (node))
{
pp_space (pp);
pp_left_paren (pp);
dump_generic_node (pp, OMP_CRITICAL_NAME (node), spc,
flags, false);
pp_right_paren (pp);
}
dump_omp_clauses (pp, OMP_CRITICAL_CLAUSES (node), spc, flags);
goto dump_omp_body;
case OMP_ATOMIC:
pp_string (pp, "#pragma omp atomic");
dump_omp_atomic_memory_order (pp, OMP_ATOMIC_MEMORY_ORDER (node));
newline_and_indent (pp, spc + 2);
dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false);
pp_space (pp);
pp_equal (pp);
pp_space (pp);
dump_generic_node (pp, TREE_OPERAND (node, 1), spc, flags, false);
break;
case OMP_ATOMIC_READ:
pp_string (pp, "#pragma omp atomic read");
dump_omp_atomic_memory_order (pp, OMP_ATOMIC_MEMORY_ORDER (node));
newline_and_indent (pp, spc + 2);
dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false);
pp_space (pp);
break;
case OMP_ATOMIC_CAPTURE_OLD:
case OMP_ATOMIC_CAPTURE_NEW:
pp_string (pp, "#pragma omp atomic capture");
dump_omp_atomic_memory_order (pp, OMP_ATOMIC_MEMORY_ORDER (node));
newline_and_indent (pp, spc + 2);
dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false);
pp_space (pp);
pp_equal (pp);
pp_space (pp);
dump_generic_node (pp, TREE_OPERAND (node, 1), spc, flags, false);
break;
case OMP_SINGLE:
pp_string (pp, "#pragma omp single");
dump_omp_clauses (pp, OMP_SINGLE_CLAUSES (node), spc, flags);
goto dump_omp_body;
case OMP_CLAUSE:
dump_omp_clause (pp, node, spc, flags);
is_expr = false;
break;
case TRANSACTION_EXPR:
if (TRANSACTION_EXPR_OUTER (node))
pp_string (pp, "__transaction_atomic [[outer]]");
else if (TRANSACTION_EXPR_RELAXED (node))
pp_string (pp, "__transaction_relaxed");
else
pp_string (pp, "__transaction_atomic");
if (!(flags & TDF_SLIM) && TRANSACTION_EXPR_BODY (node))
{
newline_and_indent (pp, spc);
pp_left_brace (pp);
newline_and_indent (pp, spc + 2);
dump_generic_node (pp, TRANSACTION_EXPR_BODY (node),
spc + 2, flags, false);
newline_and_indent (pp, spc);
pp_right_brace (pp);
}
is_expr = false;
break;
case VEC_SERIES_EXPR:
case VEC_WIDEN_MULT_HI_EXPR:
case VEC_WIDEN_MULT_LO_EXPR:
case VEC_WIDEN_MULT_EVEN_EXPR:
case VEC_WIDEN_MULT_ODD_EXPR:
case VEC_WIDEN_LSHIFT_HI_EXPR:
case VEC_WIDEN_LSHIFT_LO_EXPR:
pp_space (pp);
for (str = get_tree_code_name (code); *str; str++)
pp_character (pp, TOUPPER (*str));
pp_string (pp, " < ");
dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (pp, ", ");
dump_generic_node (pp, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (pp, " > ");
break;
case VEC_DUPLICATE_EXPR:
pp_space (pp);
for (str = get_tree_code_name (code); *str; str++)
pp_character (pp, TOUPPER (*str));
pp_string (pp, " < ");
dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (pp, " > ");
break;
case VEC_UNPACK_HI_EXPR:
pp_string (pp, " VEC_UNPACK_HI_EXPR < ");
dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (pp, " > ");
break;
case VEC_UNPACK_LO_EXPR:
pp_string (pp, " VEC_UNPACK_LO_EXPR < ");
dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (pp, " > ");
break;
case VEC_UNPACK_FLOAT_HI_EXPR:
pp_string (pp, " VEC_UNPACK_FLOAT_HI_EXPR < ");
dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (pp, " > ");
break;
case VEC_UNPACK_FLOAT_LO_EXPR:
pp_string (pp, " VEC_UNPACK_FLOAT_LO_EXPR < ");
dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (pp, " > ");
break;
case VEC_UNPACK_FIX_TRUNC_HI_EXPR:
pp_string (pp, " VEC_UNPACK_FIX_TRUNC_HI_EXPR < ");
dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (pp, " > ");
break;
case VEC_UNPACK_FIX_TRUNC_LO_EXPR:
pp_string (pp, " VEC_UNPACK_FIX_TRUNC_LO_EXPR < ");
dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (pp, " > ");
break;
case VEC_PACK_TRUNC_EXPR:
pp_string (pp, " VEC_PACK_TRUNC_EXPR < ");
dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (pp, ", ");
dump_generic_node (pp, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (pp, " > ");
break;
case VEC_PACK_SAT_EXPR:
pp_string (pp, " VEC_PACK_SAT_EXPR < ");
dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (pp, ", ");
dump_generic_node (pp, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (pp, " > ");
break;
case VEC_PACK_FIX_TRUNC_EXPR:
pp_string (pp, " VEC_PACK_FIX_TRUNC_EXPR < ");
dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (pp, ", ");
dump_generic_node (pp, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (pp, " > ");
break;
case VEC_PACK_FLOAT_EXPR:
pp_string (pp, " VEC_PACK_FLOAT_EXPR < ");
dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false);
pp_string (pp, ", ");
dump_generic_node (pp, TREE_OPERAND (node, 1), spc, flags, false);
pp_string (pp, " > ");
break;
case BLOCK:
dump_block_node (pp, node, spc, flags);
break;
case DEBUG_BEGIN_STMT:
pp_string (pp, "# DEBUG BEGIN STMT");
break;
default:
NIY;
}
if (is_stmt && is_expr)
pp_semicolon (pp);
return spc;
}
/* Print the declaration of a variable. */
void
print_declaration (pretty_printer *pp, tree t, int spc, dump_flags_t flags)
{
INDENT (spc);
if (TREE_CODE(t) == NAMELIST_DECL)
{
pp_string(pp, "namelist ");
dump_decl_name (pp, t, flags);
pp_semicolon (pp);
return;
}
if (TREE_CODE (t) == TYPE_DECL)
pp_string (pp, "typedef ");
if (CODE_CONTAINS_STRUCT (TREE_CODE (t), TS_DECL_WRTL) && DECL_REGISTER (t))
pp_string (pp, "register ");
if (TREE_PUBLIC (t) && DECL_EXTERNAL (t))
pp_string (pp, "extern ");
else if (TREE_STATIC (t))
pp_string (pp, "static ");
/* Print the type and name. */
if (TREE_TYPE (t) && TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE)
{
tree tmp;
/* Print array's type. */
tmp = TREE_TYPE (t);
while (TREE_CODE (TREE_TYPE (tmp)) == ARRAY_TYPE)
tmp = TREE_TYPE (tmp);
dump_generic_node (pp, TREE_TYPE (tmp), spc, flags, false);
/* Print variable's name. */
pp_space (pp);
dump_generic_node (pp, t, spc, flags, false);
/* Print the dimensions. */
tmp = TREE_TYPE (t);
while (TREE_CODE (tmp) == ARRAY_TYPE)
{
dump_array_domain (pp, TYPE_DOMAIN (tmp), spc, flags);
tmp = TREE_TYPE (tmp);
}
}
else if (TREE_CODE (t) == FUNCTION_DECL)
{
dump_generic_node (pp, TREE_TYPE (TREE_TYPE (t)), spc, flags, false);
pp_space (pp);
dump_decl_name (pp, t, flags);
dump_function_declaration (pp, TREE_TYPE (t), spc, flags);
}
else
{
/* Print type declaration. */
dump_generic_node (pp, TREE_TYPE (t), spc, flags, false);
/* Print variable's name. */
pp_space (pp);
dump_generic_node (pp, t, spc, flags, false);
}
if (VAR_P (t) && DECL_HARD_REGISTER (t))
{
pp_string (pp, " __asm__ ");
pp_left_paren (pp);
dump_generic_node (pp, DECL_ASSEMBLER_NAME (t), spc, flags, false);
pp_right_paren (pp);
}
/* The initial value of a function serves to determine whether the function
is declared or defined. So the following does not apply to function
nodes. */
if (TREE_CODE (t) != FUNCTION_DECL)
{
/* Print the initial value. */
if (DECL_INITIAL (t))
{
pp_space (pp);
pp_equal (pp);
pp_space (pp);
if (!(flags & TDF_SLIM))
dump_generic_node (pp, DECL_INITIAL (t), spc, flags, false);
else
pp_string (pp, "<<< omitted >>>");
}
}
if (VAR_P (t) && DECL_HAS_VALUE_EXPR_P (t))
{
pp_string (pp, " [value-expr: ");
dump_generic_node (pp, DECL_VALUE_EXPR (t), spc, flags, false);
pp_right_bracket (pp);
}
pp_semicolon (pp);
}
/* Prints a structure: name, fields, and methods.
FIXME: Still incomplete. */
static void
print_struct_decl (pretty_printer *pp, const_tree node, int spc,
dump_flags_t flags)
{
/* Print the name of the structure. */
if (TYPE_NAME (node))
{
INDENT (spc);
if (TREE_CODE (node) == RECORD_TYPE)
pp_string (pp, "struct ");
else if ((TREE_CODE (node) == UNION_TYPE
|| TREE_CODE (node) == QUAL_UNION_TYPE))
pp_string (pp, "union ");
dump_generic_node (pp, TYPE_NAME (node), spc, TDF_NONE, false);
}
/* Print the contents of the structure. */
pp_newline (pp);
INDENT (spc);
pp_left_brace (pp);
pp_newline (pp);
/* Print the fields of the structure. */
{
tree tmp;
tmp = TYPE_FIELDS (node);
while (tmp)
{
/* Avoid to print recursively the structure. */
/* FIXME : Not implemented correctly...,
what about the case when we have a cycle in the contain graph? ...
Maybe this could be solved by looking at the scope in which the
structure was declared. */
if (TREE_TYPE (tmp) != node
&& (TREE_CODE (TREE_TYPE (tmp)) != POINTER_TYPE
|| TREE_TYPE (TREE_TYPE (tmp)) != node))
{
print_declaration (pp, tmp, spc+2, flags);
pp_newline (pp);
}
tmp = DECL_CHAIN (tmp);
}
}
INDENT (spc);
pp_right_brace (pp);
}
/* Return the priority of the operator CODE.
From lowest to highest precedence with either left-to-right (L-R)
or right-to-left (R-L) associativity]:
1 [L-R] ,
2 [R-L] = += -= *= /= %= &= ^= |= <<= >>=
3 [R-L] ?:
4 [L-R] ||
5 [L-R] &&
6 [L-R] |
7 [L-R] ^
8 [L-R] &
9 [L-R] == !=
10 [L-R] < <= > >=
11 [L-R] << >>
12 [L-R] + -
13 [L-R] * / %
14 [R-L] ! ~ ++ -- + - * & (type) sizeof
15 [L-R] fn() [] -> .
unary +, - and * have higher precedence than the corresponding binary
operators. */
int
op_code_prio (enum tree_code code)
{
switch (code)
{
case TREE_LIST:
case COMPOUND_EXPR:
case BIND_EXPR:
return 1;
case MODIFY_EXPR:
case INIT_EXPR:
return 2;
case COND_EXPR:
return 3;
case TRUTH_OR_EXPR:
case TRUTH_ORIF_EXPR:
return 4;
case TRUTH_AND_EXPR:
case TRUTH_ANDIF_EXPR:
return 5;
case BIT_IOR_EXPR:
return 6;
case BIT_XOR_EXPR:
case TRUTH_XOR_EXPR:
return 7;
case BIT_AND_EXPR:
return 8;
case EQ_EXPR:
case NE_EXPR:
return 9;
case UNLT_EXPR:
case UNLE_EXPR:
case UNGT_EXPR:
case UNGE_EXPR:
case UNEQ_EXPR:
case LTGT_EXPR:
case ORDERED_EXPR:
case UNORDERED_EXPR:
case LT_EXPR:
case LE_EXPR:
case GT_EXPR:
case GE_EXPR:
return 10;
case LSHIFT_EXPR:
case RSHIFT_EXPR:
case LROTATE_EXPR:
case RROTATE_EXPR:
case VEC_WIDEN_LSHIFT_HI_EXPR:
case VEC_WIDEN_LSHIFT_LO_EXPR:
case WIDEN_LSHIFT_EXPR:
return 11;
case WIDEN_SUM_EXPR:
case PLUS_EXPR:
case POINTER_PLUS_EXPR:
case POINTER_DIFF_EXPR:
case MINUS_EXPR:
return 12;
case VEC_WIDEN_MULT_HI_EXPR:
case VEC_WIDEN_MULT_LO_EXPR:
case WIDEN_MULT_EXPR:
case DOT_PROD_EXPR:
case WIDEN_MULT_PLUS_EXPR:
case WIDEN_MULT_MINUS_EXPR:
case MULT_EXPR:
case MULT_HIGHPART_EXPR:
case TRUNC_DIV_EXPR:
case CEIL_DIV_EXPR:
case FLOOR_DIV_EXPR:
case ROUND_DIV_EXPR:
case RDIV_EXPR:
case EXACT_DIV_EXPR:
case TRUNC_MOD_EXPR:
case CEIL_MOD_EXPR:
case FLOOR_MOD_EXPR:
case ROUND_MOD_EXPR:
return 13;
case TRUTH_NOT_EXPR:
case BIT_NOT_EXPR:
case POSTINCREMENT_EXPR:
case POSTDECREMENT_EXPR:
case PREINCREMENT_EXPR:
case PREDECREMENT_EXPR:
case NEGATE_EXPR:
case INDIRECT_REF:
case ADDR_EXPR:
case FLOAT_EXPR:
CASE_CONVERT:
case FIX_TRUNC_EXPR:
case TARGET_EXPR:
return 14;
case CALL_EXPR:
case ARRAY_REF:
case ARRAY_RANGE_REF:
case COMPONENT_REF:
return 15;
/* Special expressions. */
case MIN_EXPR:
case MAX_EXPR:
case ABS_EXPR:
case REALPART_EXPR:
case IMAGPART_EXPR:
case VEC_UNPACK_HI_EXPR:
case VEC_UNPACK_LO_EXPR:
case VEC_UNPACK_FLOAT_HI_EXPR:
case VEC_UNPACK_FLOAT_LO_EXPR:
case VEC_UNPACK_FIX_TRUNC_HI_EXPR:
case VEC_UNPACK_FIX_TRUNC_LO_EXPR:
case VEC_PACK_TRUNC_EXPR:
case VEC_PACK_SAT_EXPR:
return 16;
default:
/* Return an arbitrarily high precedence to avoid surrounding single
VAR_DECLs in ()s. */
return 9999;
}
}
/* Return the priority of the operator OP. */
int
op_prio (const_tree op)
{
enum tree_code code;
if (op == NULL)
return 9999;
code = TREE_CODE (op);
if (code == SAVE_EXPR || code == NON_LVALUE_EXPR)
return op_prio (TREE_OPERAND (op, 0));
return op_code_prio (code);
}
/* Return the symbol associated with operator CODE. */
const char *
op_symbol_code (enum tree_code code)
{
switch (code)
{
case MODIFY_EXPR:
return "=";
case TRUTH_OR_EXPR:
case TRUTH_ORIF_EXPR:
return "||";
case TRUTH_AND_EXPR:
case TRUTH_ANDIF_EXPR:
return "&&";
case BIT_IOR_EXPR:
return "|";
case TRUTH_XOR_EXPR:
case BIT_XOR_EXPR:
return "^";
case ADDR_EXPR:
case BIT_AND_EXPR:
return "&";
case ORDERED_EXPR:
return "ord";
case UNORDERED_EXPR:
return "unord";
case EQ_EXPR:
return "==";
case UNEQ_EXPR:
return "u==";
case NE_EXPR:
return "!=";
case LT_EXPR:
return "<";
case UNLT_EXPR:
return "u<";
case LE_EXPR:
return "<=";
case UNLE_EXPR:
return "u<=";
case GT_EXPR:
return ">";
case UNGT_EXPR:
return "u>";
case GE_EXPR:
return ">=";
case UNGE_EXPR:
return "u>=";
case LTGT_EXPR:
return "<>";
case LSHIFT_EXPR:
return "<<";
case RSHIFT_EXPR:
return ">>";
case LROTATE_EXPR:
return "r<<";
case RROTATE_EXPR:
return "r>>";
case WIDEN_LSHIFT_EXPR:
return "w<<";
case POINTER_PLUS_EXPR:
return "+";
case PLUS_EXPR:
return "+";
case WIDEN_SUM_EXPR:
return "w+";
case WIDEN_MULT_EXPR:
return "w*";
case MULT_HIGHPART_EXPR:
return "h*";
case NEGATE_EXPR:
case MINUS_EXPR:
case POINTER_DIFF_EXPR:
return "-";
case BIT_NOT_EXPR:
return "~";
case TRUTH_NOT_EXPR:
return "!";
case MULT_EXPR:
case INDIRECT_REF:
return "*";
case TRUNC_DIV_EXPR:
case RDIV_EXPR:
return "/";
case CEIL_DIV_EXPR:
return "/[cl]";
case FLOOR_DIV_EXPR:
return "/[fl]";
case ROUND_DIV_EXPR:
return "/[rd]";
case EXACT_DIV_EXPR:
return "/[ex]";
case TRUNC_MOD_EXPR:
return "%";
case CEIL_MOD_EXPR:
return "%[cl]";
case FLOOR_MOD_EXPR:
return "%[fl]";
case ROUND_MOD_EXPR:
return "%[rd]";
case PREDECREMENT_EXPR:
return " --";
case PREINCREMENT_EXPR:
return " ++";
case POSTDECREMENT_EXPR:
return "-- ";
case POSTINCREMENT_EXPR:
return "++ ";
case MAX_EXPR:
return "max";
case MIN_EXPR:
return "min";
default:
return "<<< ??? >>>";
}
}
/* Return the symbol associated with operator OP. */
static const char *
op_symbol (const_tree op)
{
return op_symbol_code (TREE_CODE (op));
}
/* Prints the name of a call. NODE is the CALL_EXPR_FN of a CALL_EXPR or
the gimple_call_fn of a GIMPLE_CALL. */
void
print_call_name (pretty_printer *pp, tree node, dump_flags_t flags)
{
tree op0 = node;
if (TREE_CODE (op0) == NON_LVALUE_EXPR)
op0 = TREE_OPERAND (op0, 0);
again:
switch (TREE_CODE (op0))
{
case VAR_DECL:
case PARM_DECL:
case FUNCTION_DECL:
dump_function_name (pp, op0, flags);
break;
case ADDR_EXPR:
case INDIRECT_REF:
CASE_CONVERT:
op0 = TREE_OPERAND (op0, 0);
goto again;
case COND_EXPR:
pp_left_paren (pp);
dump_generic_node (pp, TREE_OPERAND (op0, 0), 0, flags, false);
pp_string (pp, ") ? ");
dump_generic_node (pp, TREE_OPERAND (op0, 1), 0, flags, false);
pp_string (pp, " : ");
dump_generic_node (pp, TREE_OPERAND (op0, 2), 0, flags, false);
break;
case ARRAY_REF:
if (TREE_CODE (TREE_OPERAND (op0, 0)) == VAR_DECL)
dump_function_name (pp, TREE_OPERAND (op0, 0), flags);
else
dump_generic_node (pp, op0, 0, flags, false);
break;
case MEM_REF:
if (integer_zerop (TREE_OPERAND (op0, 1)))
{
op0 = TREE_OPERAND (op0, 0);
goto again;
}
/* Fallthru. */
case COMPONENT_REF:
case SSA_NAME:
case OBJ_TYPE_REF:
dump_generic_node (pp, op0, 0, flags, false);
break;
default:
NIY;
}
}
/* Print the first N characters in the array STR, replacing non-printable
characters (including embedded nuls) with unambiguous escape sequences. */
void
pretty_print_string (pretty_printer *pp, const char *str, size_t n)
{
if (str == NULL)
return;
for ( ; n; --n, ++str)
{
switch (str[0])
{
case '\b':
pp_string (pp, "\\b");
break;
case '\f':
pp_string (pp, "\\f");
break;
case '\n':
pp_string (pp, "\\n");
break;
case '\r':
pp_string (pp, "\\r");
break;
case '\t':
pp_string (pp, "\\t");
break;
case '\v':
pp_string (pp, "\\v");
break;
case '\\':
pp_string (pp, "\\\\");
break;
case '\"':
pp_string (pp, "\\\"");
break;
case '\'':
pp_string (pp, "\\'");
break;
default:
if (str[0] || n > 1)
{
if (!ISPRINT (str[0]))
{
char buf[5];
sprintf (buf, "\\x%02x", (unsigned char)str[0]);
pp_string (pp, buf);
}
else
pp_character (pp, str[0]);
break;
}
}
}
}
static void
maybe_init_pretty_print (FILE *file)
{
if (!tree_pp)
{
tree_pp = new pretty_printer ();
pp_needs_newline (tree_pp) = true;
pp_translate_identifiers (tree_pp) = false;
}
tree_pp->buffer->stream = file;
}
static void
newline_and_indent (pretty_printer *pp, int spc)
{
pp_newline (pp);
INDENT (spc);
}
/* Handle the %K format for TEXT. Separate from default_tree_printer
so it can also be used in front ends.
The location LOC and BLOCK are expected to be extracted by the caller
from the %K argument arg via EXPR_LOCATION(arg) and TREE_BLOCK(arg). */
void
percent_K_format (text_info *text, location_t loc, tree block)
{
text->set_location (0, loc, SHOW_RANGE_WITH_CARET);
gcc_assert (pp_ti_abstract_origin (text) != NULL);
*pp_ti_abstract_origin (text) = NULL;
while (block
&& TREE_CODE (block) == BLOCK
&& BLOCK_ABSTRACT_ORIGIN (block))
{
tree ao = BLOCK_ABSTRACT_ORIGIN (block);
if (TREE_CODE (ao) == FUNCTION_DECL)
{
*pp_ti_abstract_origin (text) = block;
break;
}
block = BLOCK_SUPERCONTEXT (block);
}
}
/* Print the identifier ID to PRETTY-PRINTER. */
void
pp_tree_identifier (pretty_printer *pp, tree id)
{
if (pp_translate_identifiers (pp))
{
const char *text = identifier_to_locale (IDENTIFIER_POINTER (id));
pp_append_text (pp, text, text + strlen (text));
}
else
pp_append_text (pp, IDENTIFIER_POINTER (id),
IDENTIFIER_POINTER (id) + IDENTIFIER_LENGTH (id));
}
/* A helper function that is used to dump function information before the
function dump. */
void
dump_function_header (FILE *dump_file, tree fdecl, dump_flags_t flags)
{
const char *dname, *aname;
struct cgraph_node *node = cgraph_node::get (fdecl);
struct function *fun = DECL_STRUCT_FUNCTION (fdecl);
dname = lang_hooks.decl_printable_name (fdecl, 1);
if (DECL_ASSEMBLER_NAME_SET_P (fdecl))
aname = (IDENTIFIER_POINTER
(DECL_ASSEMBLER_NAME (fdecl)));
else
aname = "<unset-asm-name>";
fprintf (dump_file, "\n;; Function %s (%s, funcdef_no=%d",
dname, aname, fun->funcdef_no);
if (!(flags & TDF_NOUID))
fprintf (dump_file, ", decl_uid=%d", DECL_UID (fdecl));
if (node)
{
fprintf (dump_file, ", cgraph_uid=%d", node->get_uid ());
fprintf (dump_file, ", symbol_order=%d)%s\n\n", node->order,
node->frequency == NODE_FREQUENCY_HOT
? " (hot)"
: node->frequency == NODE_FREQUENCY_UNLIKELY_EXECUTED
? " (unlikely executed)"
: node->frequency == NODE_FREQUENCY_EXECUTED_ONCE
? " (executed once)"
: "");
}
else
fprintf (dump_file, ")\n\n");
}
/* Dump double_int D to pretty_printer PP. UNS is true
if D is unsigned and false otherwise. */
void
pp_double_int (pretty_printer *pp, double_int d, bool uns)
{
if (d.fits_shwi ())
pp_wide_integer (pp, d.low);
else if (d.fits_uhwi ())
pp_unsigned_wide_integer (pp, d.low);
else
{
unsigned HOST_WIDE_INT low = d.low;
HOST_WIDE_INT high = d.high;
if (!uns && d.is_negative ())
{
pp_minus (pp);
high = ~high + !low;
low = -low;
}
/* Would "%x%0*x" or "%x%*0x" get zero-padding on all
systems? */
sprintf (pp_buffer (pp)->digit_buffer,
HOST_WIDE_INT_PRINT_DOUBLE_HEX,
(unsigned HOST_WIDE_INT) high, low);
pp_string (pp, pp_buffer (pp)->digit_buffer);
}
}
#if __GNUC__ >= 10
# pragma GCC diagnostic pop
#endif
|
test_kernel_SIMD.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include <time.h>
#include <omp.h>
#include "H2Pack.h"
#include "H2Pack_kernels.h"
#include "parse_scalar_params.h"
#include "direct_nbody.h"
static void Gaussian_3D_eval_std_d(KRNL_EVAL_PARAM)
{
EXTRACT_3D_COORD();
const DTYPE *param_ = (DTYPE*) param;
const DTYPE l = param_[0];
for (int i = 0; i < n0; i++)
{
DTYPE *mat_irow = mat + i * ldm;
const DTYPE x0_i = x0[i];
const DTYPE y0_i = y0[i];
const DTYPE z0_i = z0[i];
//#pragma novector
#pragma omp simd
for (int j = 0; j < n1; j++)
{
DTYPE dx = x0_i - x1[j];
DTYPE dy = y0_i - y1[j];
DTYPE dz = z0_i - z1[j];
DTYPE r2 = dx * dx + dy * dy + dz * dz;
mat_irow[j] = exp(-l * r2);
}
}
}
static void Gaussian_3D_bimv_std_d(KRNL_BIMV_PARAM)
{
EXTRACT_3D_COORD();
const DTYPE *param_ = (DTYPE*) param;
const DTYPE l = param_[0];
for (int i = 0; i < n0; i += 2)
{
const DTYPE x0_i0 = x0[i];
const DTYPE y0_i0 = y0[i];
const DTYPE z0_i0 = z0[i];
const DTYPE x0_i1 = x0[i + 1];
const DTYPE y0_i1 = y0[i + 1];
const DTYPE z0_i1 = z0[i + 1];
const DTYPE xin1_i0 = x_in_1[i];
const DTYPE xin1_i1 = x_in_1[i + 1];
DTYPE sum_i0 = 0.0, sum_i1 = 0.0;
//#pragma novector
#pragma omp simd
for (int j = 0; j < n1; j++)
{
DTYPE d0, d1, r20, r21;
d0 = x0_i0 - x1[j];
d1 = x0_i1 - x1[j];
r20 = d0 * d0;
r21 = d1 * d1;
d0 = y0_i0 - y1[j];
d1 = y0_i1 - y1[j];
r20 += d0 * d0;
r21 += d1 * d1;
d0 = z0_i0 - z1[j];
d1 = z0_i1 - z1[j];
r20 += d0 * d0;
r21 += d1 * d1;
r20 = exp(-l * r20);
r21 = exp(-l * r21);
sum_i0 += r20 * x_in_0[j];
sum_i1 += r21 * x_in_0[j];
x_out_1[j] += (r20 * xin1_i0 + r21 * xin1_i1);
}
x_out_0[i] += sum_i0;
x_out_0[i+1] += sum_i1;
}
}
int main(int argc, char **argv)
{
//__itt_pause();
srand48(time(NULL));
parse_scalar_params(argc, argv);
test_params.krnl_eval = Gaussian_3D_eval_std_d;
test_params.krnl_bimv = Gaussian_3D_bimv_std_d;
double st, et;
H2Pack_p h2pack;
H2P_init(&h2pack, test_params.pt_dim, test_params.krnl_dim, QR_REL_NRM, &test_params.rel_tol);
H2P_calc_enclosing_box(test_params.pt_dim, test_params.n_point, test_params.coord, test_params.pp_fname, &h2pack->root_enbox);
int max_leaf_points = 0;
DTYPE max_leaf_size = 0.0;
H2P_partition_points(h2pack, test_params.n_point, test_params.coord, max_leaf_points, max_leaf_size);
H2P_dense_mat_p *pp;
st = get_wtime_sec();
H2P_generate_proxy_point_ID_file(
h2pack, test_params.krnl_param, test_params.krnl_eval,
test_params.pp_fname, &pp
);
et = get_wtime_sec();
printf("H2Pack load/generate proxy points used %.3lf (s)\n", et - st);
H2P_build(
h2pack, pp, test_params.BD_JIT, test_params.krnl_param,
test_params.krnl_eval, test_params.krnl_bimv, test_params.krnl_bimv_flops
);
int n_check_pt = 50000, check_pt_s;
if (n_check_pt >= test_params.n_point)
{
n_check_pt = test_params.n_point;
check_pt_s = 0;
} else {
srand(time(NULL));
check_pt_s = rand() % (test_params.n_point - n_check_pt);
}
printf("Calculating direct n-body reference result for points %d -> %d\n", check_pt_s, check_pt_s + n_check_pt - 1);
DTYPE *x, *y0, *y1;
x = (DTYPE*) malloc(sizeof(DTYPE) * test_params.krnl_mat_size);
y0 = (DTYPE*) malloc(sizeof(DTYPE) * test_params.krnl_dim * n_check_pt);
y1 = (DTYPE*) malloc(sizeof(DTYPE) * test_params.krnl_mat_size);
assert(x != NULL && y0 != NULL && y1 != NULL);
for (int i = 0; i < test_params.krnl_mat_size; i++)
{
//x[i] = (DTYPE) pseudo_randn();
x[i] = (DTYPE) drand48() - 0.5;
}
// Get reference results
direct_nbody(
test_params.krnl_param, test_params.krnl_eval, test_params.pt_dim, test_params.krnl_dim,
test_params.coord, test_params.n_point, test_params.n_point, x,
test_params.coord + check_pt_s, test_params.n_point, n_check_pt, y0
);
// Warm up, reset timers, and test the matvec performance
H2P_matvec(h2pack, x, y1);
H2P_reset_timers(h2pack);
for (int i = 0; i < 10; i++)
H2P_matvec(h2pack, x, y1);
H2P_print_statistic(h2pack);
// Verify H2 matvec results
DTYPE y0_norm = 0.0, err_norm = 0.0;
for (int i = 0; i < test_params.krnl_dim * n_check_pt; i++)
{
DTYPE diff = y1[test_params.krnl_dim * check_pt_s + i] - y0[i];
y0_norm += y0[i] * y0[i];
err_norm += diff * diff;
}
y0_norm = DSQRT(y0_norm);
err_norm = DSQRT(err_norm);
printf("For %d validation points: ||y_{H2} - y||_2 / ||y||_2 = %e\n", n_check_pt, err_norm / y0_norm);
free(x);
free(y0);
free(y1);
free_aligned(test_params.coord);
H2P_destroy(&h2pack);
return 0;
}
|
avx_mandelbrot.h | #pragma once
// credit: https://github.com/skeeto/mandel-simd/
#ifdef __AVX2__
#define USING_AVX
#include <cstdint>
#include <cstddef>
#include <x86intrin.h>
#include "mandel.h"
void
mandel_avx(uint8_t *image, const struct spec *s)
{
__m256 xmin = _mm256_set1_ps(s->xlim[0]);
__m256 ymin = _mm256_set1_ps(s->ylim[0]);
__m256 xscale = _mm256_set1_ps((s->xlim[1] - s->xlim[0]) / s->width);
__m256 yscale = _mm256_set1_ps((s->ylim[1] - s->ylim[0]) / s->height);
__m256 threshold = _mm256_set1_ps(4);
__m256 one = _mm256_set1_ps(1);
__m256 iter_scale = _mm256_set1_ps(1.0f / s->iterations);
__m256 depth_scale = _mm256_set1_ps(s->depth - 1);
#pragma omp parallel for schedule(dynamic, 1)
for (int y = 0; y < s->height; y++) {
for (int x = 0; x < s->width; x += 8) {
__m256 mx = _mm256_set_ps(x + 7, x + 6, x + 5, x + 4,
x + 3, x + 2, x + 1, x + 0);
__m256 my = _mm256_set1_ps(y);
__m256 cr = _mm256_add_ps(_mm256_mul_ps(mx, xscale), xmin);
__m256 ci = _mm256_add_ps(_mm256_mul_ps(my, yscale), ymin);
__m256 zr = cr;
__m256 zi = ci;
int k = 1;
__m256 mk = _mm256_set1_ps(k);
while (++k < s->iterations) {
/* Compute z1 from z0 */
__m256 zr2 = _mm256_mul_ps(zr, zr);
__m256 zi2 = _mm256_mul_ps(zi, zi);
__m256 zrzi = _mm256_mul_ps(zr, zi);
/* zr1 = zr0 * zr0 - zi0 * zi0 + cr */
/* zi1 = zr0 * zi0 + zr0 * zi0 + ci */
zr = _mm256_add_ps(_mm256_sub_ps(zr2, zi2), cr);
zi = _mm256_add_ps(_mm256_add_ps(zrzi, zrzi), ci);
/* Increment k */
zr2 = _mm256_mul_ps(zr, zr);
zi2 = _mm256_mul_ps(zi, zi);
__m256 mag2 = _mm256_add_ps(zr2, zi2);
__m256 mask = _mm256_cmp_ps(mag2, threshold, _CMP_LT_OS);
mk = _mm256_add_ps(_mm256_and_ps(mask, one), mk);
/* Early bailout */
if (_mm256_testz_ps(mask, _mm256_set1_ps(-1)))
break;
}
mk = _mm256_mul_ps(mk, iter_scale);
mk = _mm256_sqrt_ps(mk);
mk = _mm256_mul_ps(mk, depth_scale);
__m256i pixels = _mm256_cvtps_epi32(mk);
uint8_t *dst = image + y * s->width * 3 + x * 3;
uint8_t *src = (uint8_t *)&pixels;
for (int i = 0; i < 8; i++) {
dst[i * 3 + 0] = src[i * 4];
dst[i * 3 + 1] = src[i * 4];
dst[i * 3 + 2] = src[i * 4];
}
}
}
}
#endif
|
blurim.c | #include <stdlib.h>
#include "blurim.h"
void blurim(float* v,int M,int N,float*output){
#pragma omp parallel for
for (int H4 = 0; H4 < N; H4++) {
for (int H5 = 0; H5 < M; H5++) {
float tmp2 = 0;
float tmp3 = 0;
if (0 <= H4 - (1) && H4 - (1) < N) {
float tmp4 = 0;
float tmp5 = 0;
if (0 <= H5 - (1)) {
tmp5 = v[(((M)) * (H4 - (1))) + H5 - (1)];
}
float tmp6 = 0;
tmp6 = v[(((M)) * (H4 - (1))) + H5];
tmp4 = tmp5 + tmp6;
float tmp7 = 0;
if (H5 + 1 < M) {
tmp7 = v[(((M)) * (H4 - (1))) + H5 + 1];
}
tmp3 = tmp4 + tmp7;
}
float tmp8 = 0;
if (0 <= H4) {
float tmp9 = 0;
float tmp10 = 0;
if (0 <= H5 - (1)) {
tmp10 = v[(((M)) * (H4)) + H5 - (1)];
}
float tmp11 = 0;
tmp11 = v[(((M)) * (H4)) + H5];
tmp9 = tmp10 + tmp11;
float tmp12 = 0;
if (H5 + 1 < M) {
tmp12 = v[(((M)) * (H4)) + H5 + 1];
}
tmp8 = tmp9 + tmp12;
}
tmp2 = tmp3 + tmp8;
float tmp13 = 0;
if (0 <= H4 + 1 && H4 + 1 < N) {
float tmp14 = 0;
float tmp15 = 0;
if (0 <= H5 - (1)) {
tmp15 = v[(((M)) * (H4 + 1)) + H5 - (1)];
}
float tmp16 = 0;
tmp16 = v[(((M)) * (H4 + 1)) + H5];
tmp14 = tmp15 + tmp16;
float tmp17 = 0;
if (H5 + 1 < M) {
tmp17 = v[(((M)) * (H4 + 1)) + H5 + 1];
}
tmp13 = tmp14 + tmp17;
}
output[(M) * (H4) + H5] = tmp2 + tmp13;
}
}
}
|
bitshuffle_core.c | /*
* Bitshuffle - Filter for improving compression of typed binary data.
*
* Author: Kiyoshi Masui <kiyo@physics.ubc.ca>
* Website: http://www.github.com/kiyo-masui/bitshuffle
* Created: 2014
*
* See LICENSE file for details about copyright and rights to use.
*
*/
#include "bitshuffle_core.h"
#include "bitshuffle_internals.h"
#include <stdio.h>
#include <string.h>
#ifdef _M_X64 // MSVC x64 always has SSE2 support, but doesn't define __SSE2__ macro
#define USESSE2
// TODO: potential for SSE2 support in MSVC x86
#elif defined(__SSE2__)
#define USESSE2
#endif
#if defined(__AVX2__) && defined(USESSE2)
#define USEAVX2
#endif
#if defined(__ARM_NEON__) || (__ARM_NEON)
#define USEARMNEON
#endif
// Conditional includes for SSE2 and AVX2.
#ifdef USEAVX2
#include <immintrin.h>
#elif defined USESSE2
#include <emmintrin.h>
#elif defined USEARMNEON
#include <arm_neon.h>
#endif
#if defined(_OPENMP) && defined(_MSC_VER)
typedef int64_t omp_size_t;
#else
typedef size_t omp_size_t;
#endif
// Macros.
#define CHECK_MULT_EIGHT(n) if (n % 8) return -80;
#define MAX(X,Y) ((X) > (Y) ? (X) : (Y))
/* ---- Functions indicating compile time instruction set. ---- */
int bshuf_using_NEON(void) {
#ifdef USEARMNEON
return 1;
#else
return 0;
#endif
}
int bshuf_using_SSE2(void) {
#ifdef USESSE2
return 1;
#else
return 0;
#endif
}
int bshuf_using_AVX2(void) {
#ifdef USEAVX2
return 1;
#else
return 0;
#endif
}
/* ---- Worker code not requiring special instruction sets. ----
*
* The following code does not use any x86 specific vectorized instructions
* and should compile on any machine
*
*/
/* Transpose 8x8 bit array packed into a single quadword *x*.
* *t* is workspace. */
#define TRANS_BIT_8X8(x, t) { \
t = (x ^ (x >> 7)) & 0x00AA00AA00AA00AALL; \
x = x ^ t ^ (t << 7); \
t = (x ^ (x >> 14)) & 0x0000CCCC0000CCCCLL; \
x = x ^ t ^ (t << 14); \
t = (x ^ (x >> 28)) & 0x00000000F0F0F0F0LL; \
x = x ^ t ^ (t << 28); \
}
/* Transpose 8x8 bit array along the diagonal from upper right
to lower left */
#define TRANS_BIT_8X8_BE(x, t) { \
t = (x ^ (x >> 9)) & 0x0055005500550055LL; \
x = x ^ t ^ (t << 9); \
t = (x ^ (x >> 18)) & 0x0000333300003333LL; \
x = x ^ t ^ (t << 18); \
t = (x ^ (x >> 36)) & 0x000000000F0F0F0FLL; \
x = x ^ t ^ (t << 36); \
}
/* Transpose of an array of arbitrarily typed elements. */
#define TRANS_ELEM_TYPE(in, out, lda, ldb, type_t) { \
size_t ii, jj, kk; \
const type_t* in_type = (const type_t*) in; \
type_t* out_type = (type_t*) out; \
for(ii = 0; ii + 7 < lda; ii += 8) { \
for(jj = 0; jj < ldb; jj++) { \
for(kk = 0; kk < 8; kk++) { \
out_type[jj*lda + ii + kk] = \
in_type[ii*ldb + kk * ldb + jj]; \
} \
} \
} \
for(ii = lda - lda % 8; ii < lda; ii ++) { \
for(jj = 0; jj < ldb; jj++) { \
out_type[jj*lda + ii] = in_type[ii*ldb + jj]; \
} \
} \
}
/* Memory copy with bshuf call signature. For testing and profiling. */
int64_t bshuf_copy(const void* in, void* out, const size_t size,
const size_t elem_size) {
const char* in_b = (const char*) in;
char* out_b = (char*) out;
memcpy(out_b, in_b, size * elem_size);
return size * elem_size;
}
/* Transpose bytes within elements, starting partway through input. */
int64_t bshuf_trans_byte_elem_remainder(const void* in, void* out, const size_t size,
const size_t elem_size, const size_t start) {
size_t ii, jj, kk;
const char* in_b = (const char*) in;
char* out_b = (char*) out;
CHECK_MULT_EIGHT(start);
if (size > start) {
// ii loop separated into 2 loops so the compiler can unroll
// the inner one.
for (ii = start; ii + 7 < size; ii += 8) {
for (jj = 0; jj < elem_size; jj++) {
for (kk = 0; kk < 8; kk++) {
out_b[jj * size + ii + kk]
= in_b[ii * elem_size + kk * elem_size + jj];
}
}
}
for (ii = size - size % 8; ii < size; ii ++) {
for (jj = 0; jj < elem_size; jj++) {
out_b[jj * size + ii] = in_b[ii * elem_size + jj];
}
}
}
return size * elem_size;
}
/* Transpose bytes within elements. */
int64_t bshuf_trans_byte_elem_scal(const void* in, void* out, const size_t size,
const size_t elem_size) {
return bshuf_trans_byte_elem_remainder(in, out, size, elem_size, 0);
}
/* Transpose bits within bytes. */
int64_t bshuf_trans_bit_byte_remainder(const void* in, void* out, const size_t size,
const size_t elem_size, const size_t start_byte) {
const uint64_t* in_b = (const uint64_t*) in;
uint8_t* out_b = (uint8_t*) out;
uint64_t x, t;
size_t ii, kk;
size_t nbyte = elem_size * size;
size_t nbyte_bitrow = nbyte / 8;
uint64_t e=1;
const int little_endian = *(uint8_t *) &e == 1;
const size_t bit_row_skip = little_endian ? nbyte_bitrow : -nbyte_bitrow;
const int64_t bit_row_offset = little_endian ? 0 : 7 * nbyte_bitrow;
CHECK_MULT_EIGHT(nbyte);
CHECK_MULT_EIGHT(start_byte);
for (ii = start_byte / 8; ii < nbyte_bitrow; ii ++) {
x = in_b[ii];
if (little_endian) {
TRANS_BIT_8X8(x, t);
} else {
TRANS_BIT_8X8_BE(x, t);
}
for (kk = 0; kk < 8; kk ++) {
out_b[bit_row_offset + kk * bit_row_skip + ii] = x;
x = x >> 8;
}
}
return size * elem_size;
}
/* Transpose bits within bytes. */
int64_t bshuf_trans_bit_byte_scal(const void* in, void* out, const size_t size,
const size_t elem_size) {
return bshuf_trans_bit_byte_remainder(in, out, size, elem_size, 0);
}
/* General transpose of an array, optimized for large element sizes. */
int64_t bshuf_trans_elem(const void* in, void* out, const size_t lda,
const size_t ldb, const size_t elem_size) {
size_t ii, jj;
const char* in_b = (const char*) in;
char* out_b = (char*) out;
for(ii = 0; ii < lda; ii++) {
for(jj = 0; jj < ldb; jj++) {
memcpy(&out_b[(jj*lda + ii) * elem_size],
&in_b[(ii*ldb + jj) * elem_size], elem_size);
}
}
return lda * ldb * elem_size;
}
/* Transpose rows of shuffled bits (size / 8 bytes) within groups of 8. */
int64_t bshuf_trans_bitrow_eight(const void* in, void* out, const size_t size,
const size_t elem_size) {
size_t nbyte_bitrow = size / 8;
CHECK_MULT_EIGHT(size);
return bshuf_trans_elem(in, out, 8, elem_size, nbyte_bitrow);
}
/* Transpose bits within elements. */
int64_t bshuf_trans_bit_elem_scal(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
void *tmp_buf;
CHECK_MULT_EIGHT(size);
tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
count = bshuf_trans_byte_elem_scal(in, out, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_trans_bit_byte_scal(out, tmp_buf, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_trans_bitrow_eight(tmp_buf, out, size, elem_size);
free(tmp_buf);
return count;
}
/* For data organized into a row for each bit (8 * elem_size rows), transpose
* the bytes. */
int64_t bshuf_trans_byte_bitrow_scal(const void* in, void* out, const size_t size,
const size_t elem_size) {
size_t ii, jj, kk, nbyte_row;
const char *in_b;
char *out_b;
in_b = (const char*) in;
out_b = (char*) out;
nbyte_row = size / 8;
CHECK_MULT_EIGHT(size);
for (jj = 0; jj < elem_size; jj++) {
for (ii = 0; ii < nbyte_row; ii++) {
for (kk = 0; kk < 8; kk++) {
out_b[ii * 8 * elem_size + jj * 8 + kk] = \
in_b[(jj * 8 + kk) * nbyte_row + ii];
}
}
}
return size * elem_size;
}
/* Shuffle bits within the bytes of eight element blocks. */
int64_t bshuf_shuffle_bit_eightelem_scal(const void* in, void* out, \
const size_t size, const size_t elem_size) {
const char *in_b;
char *out_b;
uint64_t x, t;
size_t ii, jj, kk;
size_t nbyte, out_index;
uint64_t e=1;
const int little_endian = *(uint8_t *) &e == 1;
const size_t elem_skip = little_endian ? elem_size : -elem_size;
const uint64_t elem_offset = little_endian ? 0 : 7 * elem_size;
CHECK_MULT_EIGHT(size);
in_b = (const char*) in;
out_b = (char*) out;
nbyte = elem_size * size;
for (jj = 0; jj < 8 * elem_size; jj += 8) {
for (ii = 0; ii + 8 * elem_size - 1 < nbyte; ii += 8 * elem_size) {
x = *((uint64_t*) &in_b[ii + jj]);
if (little_endian) {
TRANS_BIT_8X8(x, t);
} else {
TRANS_BIT_8X8_BE(x, t);
}
for (kk = 0; kk < 8; kk++) {
out_index = ii + jj / 8 + elem_offset + kk * elem_skip;
*((uint8_t*) &out_b[out_index]) = x;
x = x >> 8;
}
}
}
return size * elem_size;
}
/* Untranspose bits within elements. */
int64_t bshuf_untrans_bit_elem_scal(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
void *tmp_buf;
CHECK_MULT_EIGHT(size);
tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
count = bshuf_trans_byte_bitrow_scal(in, tmp_buf, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_shuffle_bit_eightelem_scal(tmp_buf, out, size, elem_size);
free(tmp_buf);
return count;
}
/* ---- Worker code that uses Arm NEON ----
*
* The following code makes use of the Arm NEON instruction set.
* NEON technology is the implementation of the ARM Advanced Single
* Instruction Multiple Data (SIMD) extension.
* The NEON unit is the component of the processor that executes SIMD instructions.
* It is also called the NEON Media Processing Engine (MPE).
*
*/
#ifdef USEARMNEON
/* Transpose bytes within elements for 16 bit elements. */
int64_t bshuf_trans_byte_elem_NEON_16(const void* in, void* out, const size_t size) {
size_t ii;
const char *in_b = (const char*) in;
char *out_b = (char*) out;
int8x16_t a0, b0, a1, b1;
for (ii=0; ii + 15 < size; ii += 16) {
a0 = vld1q_s8(in_b + 2*ii + 0*16);
b0 = vld1q_s8(in_b + 2*ii + 1*16);
a1 = vzip1q_s8(a0, b0);
b1 = vzip2q_s8(a0, b0);
a0 = vzip1q_s8(a1, b1);
b0 = vzip2q_s8(a1, b1);
a1 = vzip1q_s8(a0, b0);
b1 = vzip2q_s8(a0, b0);
a0 = vzip1q_s8(a1, b1);
b0 = vzip2q_s8(a1, b1);
vst1q_s8(out_b + 0*size + ii, a0);
vst1q_s8(out_b + 1*size + ii, b0);
}
return bshuf_trans_byte_elem_remainder(in, out, size, 2,
size - size % 16);
}
/* Transpose bytes within elements for 32 bit elements. */
int64_t bshuf_trans_byte_elem_NEON_32(const void* in, void* out, const size_t size) {
size_t ii;
const char *in_b;
char *out_b;
in_b = (const char*) in;
out_b = (char*) out;
int8x16_t a0, b0, c0, d0, a1, b1, c1, d1;
int64x2_t a2, b2, c2, d2;
for (ii=0; ii + 15 < size; ii += 16) {
a0 = vld1q_s8(in_b + 4*ii + 0*16);
b0 = vld1q_s8(in_b + 4*ii + 1*16);
c0 = vld1q_s8(in_b + 4*ii + 2*16);
d0 = vld1q_s8(in_b + 4*ii + 3*16);
a1 = vzip1q_s8(a0, b0);
b1 = vzip2q_s8(a0, b0);
c1 = vzip1q_s8(c0, d0);
d1 = vzip2q_s8(c0, d0);
a0 = vzip1q_s8(a1, b1);
b0 = vzip2q_s8(a1, b1);
c0 = vzip1q_s8(c1, d1);
d0 = vzip2q_s8(c1, d1);
a1 = vzip1q_s8(a0, b0);
b1 = vzip2q_s8(a0, b0);
c1 = vzip1q_s8(c0, d0);
d1 = vzip2q_s8(c0, d0);
a2 = vzip1q_s64(vreinterpretq_s64_s8(a1), vreinterpretq_s64_s8(c1));
b2 = vzip2q_s64(vreinterpretq_s64_s8(a1), vreinterpretq_s64_s8(c1));
c2 = vzip1q_s64(vreinterpretq_s64_s8(b1), vreinterpretq_s64_s8(d1));
d2 = vzip2q_s64(vreinterpretq_s64_s8(b1), vreinterpretq_s64_s8(d1));
vst1q_s64((int64_t *) (out_b + 0*size + ii), a2);
vst1q_s64((int64_t *) (out_b + 1*size + ii), b2);
vst1q_s64((int64_t *) (out_b + 2*size + ii), c2);
vst1q_s64((int64_t *) (out_b + 3*size + ii), d2);
}
return bshuf_trans_byte_elem_remainder(in, out, size, 4,
size - size % 16);
}
/* Transpose bytes within elements for 64 bit elements. */
int64_t bshuf_trans_byte_elem_NEON_64(const void* in, void* out, const size_t size) {
size_t ii;
const char* in_b = (const char*) in;
char* out_b = (char*) out;
int8x16_t a0, b0, c0, d0, e0, f0, g0, h0;
int8x16_t a1, b1, c1, d1, e1, f1, g1, h1;
for (ii=0; ii + 15 < size; ii += 16) {
a0 = vld1q_s8(in_b + 8*ii + 0*16);
b0 = vld1q_s8(in_b + 8*ii + 1*16);
c0 = vld1q_s8(in_b + 8*ii + 2*16);
d0 = vld1q_s8(in_b + 8*ii + 3*16);
e0 = vld1q_s8(in_b + 8*ii + 4*16);
f0 = vld1q_s8(in_b + 8*ii + 5*16);
g0 = vld1q_s8(in_b + 8*ii + 6*16);
h0 = vld1q_s8(in_b + 8*ii + 7*16);
a1 = vzip1q_s8 (a0, b0);
b1 = vzip2q_s8 (a0, b0);
c1 = vzip1q_s8 (c0, d0);
d1 = vzip2q_s8 (c0, d0);
e1 = vzip1q_s8 (e0, f0);
f1 = vzip2q_s8 (e0, f0);
g1 = vzip1q_s8 (g0, h0);
h1 = vzip2q_s8 (g0, h0);
a0 = vzip1q_s8 (a1, b1);
b0 = vzip2q_s8 (a1, b1);
c0 = vzip1q_s8 (c1, d1);
d0 = vzip2q_s8 (c1, d1);
e0 = vzip1q_s8 (e1, f1);
f0 = vzip2q_s8 (e1, f1);
g0 = vzip1q_s8 (g1, h1);
h0 = vzip2q_s8 (g1, h1);
a1 = (int8x16_t) vzip1q_s32 (vreinterpretq_s32_s8 (a0), vreinterpretq_s32_s8 (c0));
b1 = (int8x16_t) vzip2q_s32 (vreinterpretq_s32_s8 (a0), vreinterpretq_s32_s8 (c0));
c1 = (int8x16_t) vzip1q_s32 (vreinterpretq_s32_s8 (b0), vreinterpretq_s32_s8 (d0));
d1 = (int8x16_t) vzip2q_s32 (vreinterpretq_s32_s8 (b0), vreinterpretq_s32_s8 (d0));
e1 = (int8x16_t) vzip1q_s32 (vreinterpretq_s32_s8 (e0), vreinterpretq_s32_s8 (g0));
f1 = (int8x16_t) vzip2q_s32 (vreinterpretq_s32_s8 (e0), vreinterpretq_s32_s8 (g0));
g1 = (int8x16_t) vzip1q_s32 (vreinterpretq_s32_s8 (f0), vreinterpretq_s32_s8 (h0));
h1 = (int8x16_t) vzip2q_s32 (vreinterpretq_s32_s8 (f0), vreinterpretq_s32_s8 (h0));
a0 = (int8x16_t) vzip1q_s64 (vreinterpretq_s64_s8 (a1), vreinterpretq_s64_s8 (e1));
b0 = (int8x16_t) vzip2q_s64 (vreinterpretq_s64_s8 (a1), vreinterpretq_s64_s8 (e1));
c0 = (int8x16_t) vzip1q_s64 (vreinterpretq_s64_s8 (b1), vreinterpretq_s64_s8 (f1));
d0 = (int8x16_t) vzip2q_s64 (vreinterpretq_s64_s8 (b1), vreinterpretq_s64_s8 (f1));
e0 = (int8x16_t) vzip1q_s64 (vreinterpretq_s64_s8 (c1), vreinterpretq_s64_s8 (g1));
f0 = (int8x16_t) vzip2q_s64 (vreinterpretq_s64_s8 (c1), vreinterpretq_s64_s8 (g1));
g0 = (int8x16_t) vzip1q_s64 (vreinterpretq_s64_s8 (d1), vreinterpretq_s64_s8 (h1));
h0 = (int8x16_t) vzip2q_s64 (vreinterpretq_s64_s8 (d1), vreinterpretq_s64_s8 (h1));
vst1q_s8(out_b + 0*size + ii, a0);
vst1q_s8(out_b + 1*size + ii, b0);
vst1q_s8(out_b + 2*size + ii, c0);
vst1q_s8(out_b + 3*size + ii, d0);
vst1q_s8(out_b + 4*size + ii, e0);
vst1q_s8(out_b + 5*size + ii, f0);
vst1q_s8(out_b + 6*size + ii, g0);
vst1q_s8(out_b + 7*size + ii, h0);
}
return bshuf_trans_byte_elem_remainder(in, out, size, 8,
size - size % 16);
}
/* Transpose bytes within elements using best NEON algorithm available. */
int64_t bshuf_trans_byte_elem_NEON(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
// Trivial cases: power of 2 bytes.
switch (elem_size) {
case 1:
count = bshuf_copy(in, out, size, elem_size);
return count;
case 2:
count = bshuf_trans_byte_elem_NEON_16(in, out, size);
return count;
case 4:
count = bshuf_trans_byte_elem_NEON_32(in, out, size);
return count;
case 8:
count = bshuf_trans_byte_elem_NEON_64(in, out, size);
return count;
}
// Worst case: odd number of bytes. Turns out that this is faster for
// (odd * 2) byte elements as well (hence % 4).
if (elem_size % 4) {
count = bshuf_trans_byte_elem_scal(in, out, size, elem_size);
return count;
}
// Multiple of power of 2: transpose hierarchically.
{
size_t nchunk_elem;
void* tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
if ((elem_size % 8) == 0) {
nchunk_elem = elem_size / 8;
TRANS_ELEM_TYPE(in, out, size, nchunk_elem, int64_t);
count = bshuf_trans_byte_elem_NEON_64(out, tmp_buf,
size * nchunk_elem);
bshuf_trans_elem(tmp_buf, out, 8, nchunk_elem, size);
} else if ((elem_size % 4) == 0) {
nchunk_elem = elem_size / 4;
TRANS_ELEM_TYPE(in, out, size, nchunk_elem, int32_t);
count = bshuf_trans_byte_elem_NEON_32(out, tmp_buf,
size * nchunk_elem);
bshuf_trans_elem(tmp_buf, out, 4, nchunk_elem, size);
} else {
// Not used since scalar algorithm is faster.
nchunk_elem = elem_size / 2;
TRANS_ELEM_TYPE(in, out, size, nchunk_elem, int16_t);
count = bshuf_trans_byte_elem_NEON_16(out, tmp_buf,
size * nchunk_elem);
bshuf_trans_elem(tmp_buf, out, 2, nchunk_elem, size);
}
free(tmp_buf);
return count;
}
}
/* Creates a mask made up of the most significant
* bit of each byte of 'input'
*/
int32_t move_byte_mask_neon(uint8x16_t input) {
return ( ((input[0] & 0x80) >> 7) | (((input[1] & 0x80) >> 7) << 1) | (((input[2] & 0x80) >> 7) << 2) | (((input[3] & 0x80) >> 7) << 3)
| (((input[4] & 0x80) >> 7) << 4) | (((input[5] & 0x80) >> 7) << 5) | (((input[6] & 0x80) >> 7) << 6) | (((input[7] & 0x80) >> 7) << 7)
| (((input[8] & 0x80) >> 7) << 8) | (((input[9] & 0x80) >> 7) << 9) | (((input[10] & 0x80) >> 7) << 10) | (((input[11] & 0x80) >> 7) << 11)
| (((input[12] & 0x80) >> 7) << 12) | (((input[13] & 0x80) >> 7) << 13) | (((input[14] & 0x80) >> 7) << 14) | (((input[15] & 0x80) >> 7) << 15)
);
}
/* Transpose bits within bytes. */
int64_t bshuf_trans_bit_byte_NEON(const void* in, void* out, const size_t size,
const size_t elem_size) {
size_t ii, kk;
const char* in_b = (const char*) in;
char* out_b = (char*) out;
uint16_t* out_ui16;
int64_t count;
size_t nbyte = elem_size * size;
CHECK_MULT_EIGHT(nbyte);
int16x8_t xmm;
int32_t bt;
for (ii = 0; ii + 15 < nbyte; ii += 16) {
xmm = vld1q_s16((int16_t *) (in_b + ii));
for (kk = 0; kk < 8; kk++) {
bt = move_byte_mask_neon((uint8x16_t) xmm);
xmm = vshlq_n_s16(xmm, 1);
out_ui16 = (uint16_t*) &out_b[((7 - kk) * nbyte + ii) / 8];
*out_ui16 = bt;
}
}
count = bshuf_trans_bit_byte_remainder(in, out, size, elem_size,
nbyte - nbyte % 16);
return count;
}
/* Transpose bits within elements. */
int64_t bshuf_trans_bit_elem_NEON(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
CHECK_MULT_EIGHT(size);
void* tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
count = bshuf_trans_byte_elem_NEON(in, out, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_trans_bit_byte_NEON(out, tmp_buf, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_trans_bitrow_eight(tmp_buf, out, size, elem_size);
free(tmp_buf);
return count;
}
/* For data organized into a row for each bit (8 * elem_size rows), transpose
* the bytes. */
int64_t bshuf_trans_byte_bitrow_NEON(const void* in, void* out, const size_t size,
const size_t elem_size) {
size_t ii, jj;
const char* in_b = (const char*) in;
char* out_b = (char*) out;
CHECK_MULT_EIGHT(size);
size_t nrows = 8 * elem_size;
size_t nbyte_row = size / 8;
int8x16_t a0, b0, c0, d0, e0, f0, g0, h0;
int8x16_t a1, b1, c1, d1, e1, f1, g1, h1;
int64x1_t *as, *bs, *cs, *ds, *es, *fs, *gs, *hs;
for (ii = 0; ii + 7 < nrows; ii += 8) {
for (jj = 0; jj + 15 < nbyte_row; jj += 16) {
a0 = vld1q_s8(in_b + (ii + 0)*nbyte_row + jj);
b0 = vld1q_s8(in_b + (ii + 1)*nbyte_row + jj);
c0 = vld1q_s8(in_b + (ii + 2)*nbyte_row + jj);
d0 = vld1q_s8(in_b + (ii + 3)*nbyte_row + jj);
e0 = vld1q_s8(in_b + (ii + 4)*nbyte_row + jj);
f0 = vld1q_s8(in_b + (ii + 5)*nbyte_row + jj);
g0 = vld1q_s8(in_b + (ii + 6)*nbyte_row + jj);
h0 = vld1q_s8(in_b + (ii + 7)*nbyte_row + jj);
a1 = vzip1q_s8(a0, b0);
b1 = vzip1q_s8(c0, d0);
c1 = vzip1q_s8(e0, f0);
d1 = vzip1q_s8(g0, h0);
e1 = vzip2q_s8(a0, b0);
f1 = vzip2q_s8(c0, d0);
g1 = vzip2q_s8(e0, f0);
h1 = vzip2q_s8(g0, h0);
a0 = (int8x16_t) vzip1q_s16 (vreinterpretq_s16_s8 (a1), vreinterpretq_s16_s8 (b1));
b0= (int8x16_t) vzip1q_s16 (vreinterpretq_s16_s8 (c1), vreinterpretq_s16_s8 (d1));
c0 = (int8x16_t) vzip2q_s16 (vreinterpretq_s16_s8 (a1), vreinterpretq_s16_s8 (b1));
d0 = (int8x16_t) vzip2q_s16 (vreinterpretq_s16_s8 (c1), vreinterpretq_s16_s8 (d1));
e0 = (int8x16_t) vzip1q_s16 (vreinterpretq_s16_s8 (e1), vreinterpretq_s16_s8 (f1));
f0 = (int8x16_t) vzip1q_s16 (vreinterpretq_s16_s8 (g1), vreinterpretq_s16_s8 (h1));
g0 = (int8x16_t) vzip2q_s16 (vreinterpretq_s16_s8 (e1), vreinterpretq_s16_s8 (f1));
h0 = (int8x16_t) vzip2q_s16 (vreinterpretq_s16_s8 (g1), vreinterpretq_s16_s8 (h1));
a1 = (int8x16_t) vzip1q_s32 (vreinterpretq_s32_s8 (a0), vreinterpretq_s32_s8 (b0));
b1 = (int8x16_t) vzip2q_s32 (vreinterpretq_s32_s8 (a0), vreinterpretq_s32_s8 (b0));
c1 = (int8x16_t) vzip1q_s32 (vreinterpretq_s32_s8 (c0), vreinterpretq_s32_s8 (d0));
d1 = (int8x16_t) vzip2q_s32 (vreinterpretq_s32_s8 (c0), vreinterpretq_s32_s8 (d0));
e1 = (int8x16_t) vzip1q_s32 (vreinterpretq_s32_s8 (e0), vreinterpretq_s32_s8 (f0));
f1 = (int8x16_t) vzip2q_s32 (vreinterpretq_s32_s8 (e0), vreinterpretq_s32_s8 (f0));
g1 = (int8x16_t) vzip1q_s32 (vreinterpretq_s32_s8 (g0), vreinterpretq_s32_s8 (h0));
h1 = (int8x16_t) vzip2q_s32 (vreinterpretq_s32_s8 (g0), vreinterpretq_s32_s8 (h0));
as = (int64x1_t *) &a1;
bs = (int64x1_t *) &b1;
cs = (int64x1_t *) &c1;
ds = (int64x1_t *) &d1;
es = (int64x1_t *) &e1;
fs = (int64x1_t *) &f1;
gs = (int64x1_t *) &g1;
hs = (int64x1_t *) &h1;
vst1_s64((int64_t *)(out_b + (jj + 0) * nrows + ii), *as);
vst1_s64((int64_t *)(out_b + (jj + 1) * nrows + ii), *(as + 1));
vst1_s64((int64_t *)(out_b + (jj + 2) * nrows + ii), *bs);
vst1_s64((int64_t *)(out_b + (jj + 3) * nrows + ii), *(bs + 1));
vst1_s64((int64_t *)(out_b + (jj + 4) * nrows + ii), *cs);
vst1_s64((int64_t *)(out_b + (jj + 5) * nrows + ii), *(cs + 1));
vst1_s64((int64_t *)(out_b + (jj + 6) * nrows + ii), *ds);
vst1_s64((int64_t *)(out_b + (jj + 7) * nrows + ii), *(ds + 1));
vst1_s64((int64_t *)(out_b + (jj + 8) * nrows + ii), *es);
vst1_s64((int64_t *)(out_b + (jj + 9) * nrows + ii), *(es + 1));
vst1_s64((int64_t *)(out_b + (jj + 10) * nrows + ii), *fs);
vst1_s64((int64_t *)(out_b + (jj + 11) * nrows + ii), *(fs + 1));
vst1_s64((int64_t *)(out_b + (jj + 12) * nrows + ii), *gs);
vst1_s64((int64_t *)(out_b + (jj + 13) * nrows + ii), *(gs + 1));
vst1_s64((int64_t *)(out_b + (jj + 14) * nrows + ii), *hs);
vst1_s64((int64_t *)(out_b + (jj + 15) * nrows + ii), *(hs + 1));
}
for (jj = nbyte_row - nbyte_row % 16; jj < nbyte_row; jj ++) {
out_b[jj * nrows + ii + 0] = in_b[(ii + 0)*nbyte_row + jj];
out_b[jj * nrows + ii + 1] = in_b[(ii + 1)*nbyte_row + jj];
out_b[jj * nrows + ii + 2] = in_b[(ii + 2)*nbyte_row + jj];
out_b[jj * nrows + ii + 3] = in_b[(ii + 3)*nbyte_row + jj];
out_b[jj * nrows + ii + 4] = in_b[(ii + 4)*nbyte_row + jj];
out_b[jj * nrows + ii + 5] = in_b[(ii + 5)*nbyte_row + jj];
out_b[jj * nrows + ii + 6] = in_b[(ii + 6)*nbyte_row + jj];
out_b[jj * nrows + ii + 7] = in_b[(ii + 7)*nbyte_row + jj];
}
}
return size * elem_size;
}
/* Shuffle bits within the bytes of eight element blocks. */
int64_t bshuf_shuffle_bit_eightelem_NEON(const void* in, void* out, const size_t size,
const size_t elem_size) {
CHECK_MULT_EIGHT(size);
// With a bit of care, this could be written such that such that it is
// in_buf = out_buf safe.
const char* in_b = (const char*) in;
uint16_t* out_ui16 = (uint16_t*) out;
size_t ii, jj, kk;
size_t nbyte = elem_size * size;
int16x8_t xmm;
int32_t bt;
if (elem_size % 2) {
bshuf_shuffle_bit_eightelem_scal(in, out, size, elem_size);
} else {
for (ii = 0; ii + 8 * elem_size - 1 < nbyte;
ii += 8 * elem_size) {
for (jj = 0; jj + 15 < 8 * elem_size; jj += 16) {
xmm = vld1q_s16((int16_t *) &in_b[ii + jj]);
for (kk = 0; kk < 8; kk++) {
bt = move_byte_mask_neon((uint8x16_t) xmm);
xmm = vshlq_n_s16(xmm, 1);
size_t ind = (ii + jj / 8 + (7 - kk) * elem_size);
out_ui16[ind / 2] = bt;
}
}
}
}
return size * elem_size;
}
/* Untranspose bits within elements. */
int64_t bshuf_untrans_bit_elem_NEON(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
CHECK_MULT_EIGHT(size);
void* tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
count = bshuf_trans_byte_bitrow_NEON(in, tmp_buf, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_shuffle_bit_eightelem_NEON(tmp_buf, out, size, elem_size);
free(tmp_buf);
return count;
}
#else // #ifdef USEARMNEON
int64_t bshuf_untrans_bit_elem_NEON(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -13;
}
int64_t bshuf_trans_bit_elem_NEON(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -13;
}
int64_t bshuf_trans_byte_bitrow_NEON(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -13;
}
int64_t bshuf_trans_bit_byte_NEON(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -13;
}
int64_t bshuf_trans_byte_elem_NEON(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -13;
}
int64_t bshuf_trans_byte_elem_NEON_64(const void* in, void* out, const size_t size) {
return -13;
}
int64_t bshuf_trans_byte_elem_NEON_32(const void* in, void* out, const size_t size) {
return -13;
}
int64_t bshuf_trans_byte_elem_NEON_16(const void* in, void* out, const size_t size) {
return -13;
}
int64_t bshuf_shuffle_bit_eightelem_NEON(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -13;
}
#endif
/* ---- Worker code that uses SSE2 ----
*
* The following code makes use of the SSE2 instruction set and specialized
* 16 byte registers. The SSE2 instructions are present on modern x86
* processors. The first Intel processor microarchitecture supporting SSE2 was
* Pentium 4 (2000).
*
*/
#ifdef USESSE2
/* Transpose bytes within elements for 16 bit elements. */
int64_t bshuf_trans_byte_elem_SSE_16(const void* in, void* out, const size_t size) {
size_t ii;
const char *in_b = (const char*) in;
char *out_b = (char*) out;
__m128i a0, b0, a1, b1;
for (ii=0; ii + 15 < size; ii += 16) {
a0 = _mm_loadu_si128((__m128i *) &in_b[2*ii + 0*16]);
b0 = _mm_loadu_si128((__m128i *) &in_b[2*ii + 1*16]);
a1 = _mm_unpacklo_epi8(a0, b0);
b1 = _mm_unpackhi_epi8(a0, b0);
a0 = _mm_unpacklo_epi8(a1, b1);
b0 = _mm_unpackhi_epi8(a1, b1);
a1 = _mm_unpacklo_epi8(a0, b0);
b1 = _mm_unpackhi_epi8(a0, b0);
a0 = _mm_unpacklo_epi8(a1, b1);
b0 = _mm_unpackhi_epi8(a1, b1);
_mm_storeu_si128((__m128i *) &out_b[0*size + ii], a0);
_mm_storeu_si128((__m128i *) &out_b[1*size + ii], b0);
}
return bshuf_trans_byte_elem_remainder(in, out, size, 2,
size - size % 16);
}
/* Transpose bytes within elements for 32 bit elements. */
int64_t bshuf_trans_byte_elem_SSE_32(const void* in, void* out, const size_t size) {
size_t ii;
const char *in_b;
char *out_b;
in_b = (const char*) in;
out_b = (char*) out;
__m128i a0, b0, c0, d0, a1, b1, c1, d1;
for (ii=0; ii + 15 < size; ii += 16) {
a0 = _mm_loadu_si128((__m128i *) &in_b[4*ii + 0*16]);
b0 = _mm_loadu_si128((__m128i *) &in_b[4*ii + 1*16]);
c0 = _mm_loadu_si128((__m128i *) &in_b[4*ii + 2*16]);
d0 = _mm_loadu_si128((__m128i *) &in_b[4*ii + 3*16]);
a1 = _mm_unpacklo_epi8(a0, b0);
b1 = _mm_unpackhi_epi8(a0, b0);
c1 = _mm_unpacklo_epi8(c0, d0);
d1 = _mm_unpackhi_epi8(c0, d0);
a0 = _mm_unpacklo_epi8(a1, b1);
b0 = _mm_unpackhi_epi8(a1, b1);
c0 = _mm_unpacklo_epi8(c1, d1);
d0 = _mm_unpackhi_epi8(c1, d1);
a1 = _mm_unpacklo_epi8(a0, b0);
b1 = _mm_unpackhi_epi8(a0, b0);
c1 = _mm_unpacklo_epi8(c0, d0);
d1 = _mm_unpackhi_epi8(c0, d0);
a0 = _mm_unpacklo_epi64(a1, c1);
b0 = _mm_unpackhi_epi64(a1, c1);
c0 = _mm_unpacklo_epi64(b1, d1);
d0 = _mm_unpackhi_epi64(b1, d1);
_mm_storeu_si128((__m128i *) &out_b[0*size + ii], a0);
_mm_storeu_si128((__m128i *) &out_b[1*size + ii], b0);
_mm_storeu_si128((__m128i *) &out_b[2*size + ii], c0);
_mm_storeu_si128((__m128i *) &out_b[3*size + ii], d0);
}
return bshuf_trans_byte_elem_remainder(in, out, size, 4,
size - size % 16);
}
/* Transpose bytes within elements for 64 bit elements. */
int64_t bshuf_trans_byte_elem_SSE_64(const void* in, void* out, const size_t size) {
size_t ii;
const char* in_b = (const char*) in;
char* out_b = (char*) out;
__m128i a0, b0, c0, d0, e0, f0, g0, h0;
__m128i a1, b1, c1, d1, e1, f1, g1, h1;
for (ii=0; ii + 15 < size; ii += 16) {
a0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 0*16]);
b0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 1*16]);
c0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 2*16]);
d0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 3*16]);
e0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 4*16]);
f0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 5*16]);
g0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 6*16]);
h0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 7*16]);
a1 = _mm_unpacklo_epi8(a0, b0);
b1 = _mm_unpackhi_epi8(a0, b0);
c1 = _mm_unpacklo_epi8(c0, d0);
d1 = _mm_unpackhi_epi8(c0, d0);
e1 = _mm_unpacklo_epi8(e0, f0);
f1 = _mm_unpackhi_epi8(e0, f0);
g1 = _mm_unpacklo_epi8(g0, h0);
h1 = _mm_unpackhi_epi8(g0, h0);
a0 = _mm_unpacklo_epi8(a1, b1);
b0 = _mm_unpackhi_epi8(a1, b1);
c0 = _mm_unpacklo_epi8(c1, d1);
d0 = _mm_unpackhi_epi8(c1, d1);
e0 = _mm_unpacklo_epi8(e1, f1);
f0 = _mm_unpackhi_epi8(e1, f1);
g0 = _mm_unpacklo_epi8(g1, h1);
h0 = _mm_unpackhi_epi8(g1, h1);
a1 = _mm_unpacklo_epi32(a0, c0);
b1 = _mm_unpackhi_epi32(a0, c0);
c1 = _mm_unpacklo_epi32(b0, d0);
d1 = _mm_unpackhi_epi32(b0, d0);
e1 = _mm_unpacklo_epi32(e0, g0);
f1 = _mm_unpackhi_epi32(e0, g0);
g1 = _mm_unpacklo_epi32(f0, h0);
h1 = _mm_unpackhi_epi32(f0, h0);
a0 = _mm_unpacklo_epi64(a1, e1);
b0 = _mm_unpackhi_epi64(a1, e1);
c0 = _mm_unpacklo_epi64(b1, f1);
d0 = _mm_unpackhi_epi64(b1, f1);
e0 = _mm_unpacklo_epi64(c1, g1);
f0 = _mm_unpackhi_epi64(c1, g1);
g0 = _mm_unpacklo_epi64(d1, h1);
h0 = _mm_unpackhi_epi64(d1, h1);
_mm_storeu_si128((__m128i *) &out_b[0*size + ii], a0);
_mm_storeu_si128((__m128i *) &out_b[1*size + ii], b0);
_mm_storeu_si128((__m128i *) &out_b[2*size + ii], c0);
_mm_storeu_si128((__m128i *) &out_b[3*size + ii], d0);
_mm_storeu_si128((__m128i *) &out_b[4*size + ii], e0);
_mm_storeu_si128((__m128i *) &out_b[5*size + ii], f0);
_mm_storeu_si128((__m128i *) &out_b[6*size + ii], g0);
_mm_storeu_si128((__m128i *) &out_b[7*size + ii], h0);
}
return bshuf_trans_byte_elem_remainder(in, out, size, 8,
size - size % 16);
}
/* Transpose bytes within elements using best SSE algorithm available. */
int64_t bshuf_trans_byte_elem_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
// Trivial cases: power of 2 bytes.
switch (elem_size) {
case 1:
count = bshuf_copy(in, out, size, elem_size);
return count;
case 2:
count = bshuf_trans_byte_elem_SSE_16(in, out, size);
return count;
case 4:
count = bshuf_trans_byte_elem_SSE_32(in, out, size);
return count;
case 8:
count = bshuf_trans_byte_elem_SSE_64(in, out, size);
return count;
}
// Worst case: odd number of bytes. Turns out that this is faster for
// (odd * 2) byte elements as well (hence % 4).
if (elem_size % 4) {
count = bshuf_trans_byte_elem_scal(in, out, size, elem_size);
return count;
}
// Multiple of power of 2: transpose hierarchically.
{
size_t nchunk_elem;
void* tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
if ((elem_size % 8) == 0) {
nchunk_elem = elem_size / 8;
TRANS_ELEM_TYPE(in, out, size, nchunk_elem, int64_t);
count = bshuf_trans_byte_elem_SSE_64(out, tmp_buf,
size * nchunk_elem);
bshuf_trans_elem(tmp_buf, out, 8, nchunk_elem, size);
} else if ((elem_size % 4) == 0) {
nchunk_elem = elem_size / 4;
TRANS_ELEM_TYPE(in, out, size, nchunk_elem, int32_t);
count = bshuf_trans_byte_elem_SSE_32(out, tmp_buf,
size * nchunk_elem);
bshuf_trans_elem(tmp_buf, out, 4, nchunk_elem, size);
} else {
// Not used since scalar algorithm is faster.
nchunk_elem = elem_size / 2;
#ifdef _WIN32 // Windows doesn't defined `int16_t`
TRANS_ELEM_TYPE(in, out, size, nchunk_elem, __int16);
#else
TRANS_ELEM_TYPE(in, out, size, nchunk_elem, int16_t);
#endif
count = bshuf_trans_byte_elem_SSE_16(out, tmp_buf,
size * nchunk_elem);
bshuf_trans_elem(tmp_buf, out, 2, nchunk_elem, size);
}
free(tmp_buf);
return count;
}
}
/* Transpose bits within bytes. */
int64_t bshuf_trans_bit_byte_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
size_t ii, kk;
const char* in_b = (const char*) in;
char* out_b = (char*) out;
uint16_t* out_ui16;
int64_t count;
size_t nbyte = elem_size * size;
CHECK_MULT_EIGHT(nbyte);
__m128i xmm;
int32_t bt;
for (ii = 0; ii + 15 < nbyte; ii += 16) {
xmm = _mm_loadu_si128((__m128i *) &in_b[ii]);
for (kk = 0; kk < 8; kk++) {
bt = _mm_movemask_epi8(xmm);
xmm = _mm_slli_epi16(xmm, 1);
out_ui16 = (uint16_t*) &out_b[((7 - kk) * nbyte + ii) / 8];
*out_ui16 = bt;
}
}
count = bshuf_trans_bit_byte_remainder(in, out, size, elem_size,
nbyte - nbyte % 16);
return count;
}
/* Transpose bits within elements. */
int64_t bshuf_trans_bit_elem_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
CHECK_MULT_EIGHT(size);
void* tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
count = bshuf_trans_byte_elem_SSE(in, out, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_trans_bit_byte_SSE(out, tmp_buf, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_trans_bitrow_eight(tmp_buf, out, size, elem_size);
free(tmp_buf);
return count;
}
/* For data organized into a row for each bit (8 * elem_size rows), transpose
* the bytes. */
int64_t bshuf_trans_byte_bitrow_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
size_t ii, jj;
const char* in_b = (const char*) in;
char* out_b = (char*) out;
CHECK_MULT_EIGHT(size);
size_t nrows = 8 * elem_size;
size_t nbyte_row = size / 8;
__m128i a0, b0, c0, d0, e0, f0, g0, h0;
__m128i a1, b1, c1, d1, e1, f1, g1, h1;
__m128 *as, *bs, *cs, *ds, *es, *fs, *gs, *hs;
for (ii = 0; ii + 7 < nrows; ii += 8) {
for (jj = 0; jj + 15 < nbyte_row; jj += 16) {
a0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 0)*nbyte_row + jj]);
b0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 1)*nbyte_row + jj]);
c0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 2)*nbyte_row + jj]);
d0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 3)*nbyte_row + jj]);
e0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 4)*nbyte_row + jj]);
f0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 5)*nbyte_row + jj]);
g0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 6)*nbyte_row + jj]);
h0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 7)*nbyte_row + jj]);
a1 = _mm_unpacklo_epi8(a0, b0);
b1 = _mm_unpacklo_epi8(c0, d0);
c1 = _mm_unpacklo_epi8(e0, f0);
d1 = _mm_unpacklo_epi8(g0, h0);
e1 = _mm_unpackhi_epi8(a0, b0);
f1 = _mm_unpackhi_epi8(c0, d0);
g1 = _mm_unpackhi_epi8(e0, f0);
h1 = _mm_unpackhi_epi8(g0, h0);
a0 = _mm_unpacklo_epi16(a1, b1);
b0 = _mm_unpacklo_epi16(c1, d1);
c0 = _mm_unpackhi_epi16(a1, b1);
d0 = _mm_unpackhi_epi16(c1, d1);
e0 = _mm_unpacklo_epi16(e1, f1);
f0 = _mm_unpacklo_epi16(g1, h1);
g0 = _mm_unpackhi_epi16(e1, f1);
h0 = _mm_unpackhi_epi16(g1, h1);
a1 = _mm_unpacklo_epi32(a0, b0);
b1 = _mm_unpackhi_epi32(a0, b0);
c1 = _mm_unpacklo_epi32(c0, d0);
d1 = _mm_unpackhi_epi32(c0, d0);
e1 = _mm_unpacklo_epi32(e0, f0);
f1 = _mm_unpackhi_epi32(e0, f0);
g1 = _mm_unpacklo_epi32(g0, h0);
h1 = _mm_unpackhi_epi32(g0, h0);
// We don't have a storeh instruction for integers, so interpret
// as a float. Have a storel (_mm_storel_epi64).
as = (__m128 *) &a1;
bs = (__m128 *) &b1;
cs = (__m128 *) &c1;
ds = (__m128 *) &d1;
es = (__m128 *) &e1;
fs = (__m128 *) &f1;
gs = (__m128 *) &g1;
hs = (__m128 *) &h1;
_mm_storel_pi((__m64 *) &out_b[(jj + 0) * nrows + ii], *as);
_mm_storel_pi((__m64 *) &out_b[(jj + 2) * nrows + ii], *bs);
_mm_storel_pi((__m64 *) &out_b[(jj + 4) * nrows + ii], *cs);
_mm_storel_pi((__m64 *) &out_b[(jj + 6) * nrows + ii], *ds);
_mm_storel_pi((__m64 *) &out_b[(jj + 8) * nrows + ii], *es);
_mm_storel_pi((__m64 *) &out_b[(jj + 10) * nrows + ii], *fs);
_mm_storel_pi((__m64 *) &out_b[(jj + 12) * nrows + ii], *gs);
_mm_storel_pi((__m64 *) &out_b[(jj + 14) * nrows + ii], *hs);
_mm_storeh_pi((__m64 *) &out_b[(jj + 1) * nrows + ii], *as);
_mm_storeh_pi((__m64 *) &out_b[(jj + 3) * nrows + ii], *bs);
_mm_storeh_pi((__m64 *) &out_b[(jj + 5) * nrows + ii], *cs);
_mm_storeh_pi((__m64 *) &out_b[(jj + 7) * nrows + ii], *ds);
_mm_storeh_pi((__m64 *) &out_b[(jj + 9) * nrows + ii], *es);
_mm_storeh_pi((__m64 *) &out_b[(jj + 11) * nrows + ii], *fs);
_mm_storeh_pi((__m64 *) &out_b[(jj + 13) * nrows + ii], *gs);
_mm_storeh_pi((__m64 *) &out_b[(jj + 15) * nrows + ii], *hs);
}
for (jj = nbyte_row - nbyte_row % 16; jj < nbyte_row; jj ++) {
out_b[jj * nrows + ii + 0] = in_b[(ii + 0)*nbyte_row + jj];
out_b[jj * nrows + ii + 1] = in_b[(ii + 1)*nbyte_row + jj];
out_b[jj * nrows + ii + 2] = in_b[(ii + 2)*nbyte_row + jj];
out_b[jj * nrows + ii + 3] = in_b[(ii + 3)*nbyte_row + jj];
out_b[jj * nrows + ii + 4] = in_b[(ii + 4)*nbyte_row + jj];
out_b[jj * nrows + ii + 5] = in_b[(ii + 5)*nbyte_row + jj];
out_b[jj * nrows + ii + 6] = in_b[(ii + 6)*nbyte_row + jj];
out_b[jj * nrows + ii + 7] = in_b[(ii + 7)*nbyte_row + jj];
}
}
return size * elem_size;
}
/* Shuffle bits within the bytes of eight element blocks. */
int64_t bshuf_shuffle_bit_eightelem_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
CHECK_MULT_EIGHT(size);
// With a bit of care, this could be written such that such that it is
// in_buf = out_buf safe.
const char* in_b = (const char*) in;
uint16_t* out_ui16 = (uint16_t*) out;
size_t ii, jj, kk;
size_t nbyte = elem_size * size;
__m128i xmm;
int32_t bt;
if (elem_size % 2) {
bshuf_shuffle_bit_eightelem_scal(in, out, size, elem_size);
} else {
for (ii = 0; ii + 8 * elem_size - 1 < nbyte;
ii += 8 * elem_size) {
for (jj = 0; jj + 15 < 8 * elem_size; jj += 16) {
xmm = _mm_loadu_si128((__m128i *) &in_b[ii + jj]);
for (kk = 0; kk < 8; kk++) {
bt = _mm_movemask_epi8(xmm);
xmm = _mm_slli_epi16(xmm, 1);
size_t ind = (ii + jj / 8 + (7 - kk) * elem_size);
out_ui16[ind / 2] = bt;
}
}
}
}
return size * elem_size;
}
/* Untranspose bits within elements. */
int64_t bshuf_untrans_bit_elem_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
CHECK_MULT_EIGHT(size);
void* tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
count = bshuf_trans_byte_bitrow_SSE(in, tmp_buf, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_shuffle_bit_eightelem_SSE(tmp_buf, out, size, elem_size);
free(tmp_buf);
return count;
}
#else // #ifdef USESSE2
int64_t bshuf_untrans_bit_elem_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -11;
}
int64_t bshuf_trans_bit_elem_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -11;
}
int64_t bshuf_trans_byte_bitrow_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -11;
}
int64_t bshuf_trans_bit_byte_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -11;
}
int64_t bshuf_trans_byte_elem_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -11;
}
int64_t bshuf_trans_byte_elem_SSE_64(const void* in, void* out, const size_t size) {
return -11;
}
int64_t bshuf_trans_byte_elem_SSE_32(const void* in, void* out, const size_t size) {
return -11;
}
int64_t bshuf_trans_byte_elem_SSE_16(const void* in, void* out, const size_t size) {
return -11;
}
int64_t bshuf_shuffle_bit_eightelem_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -11;
}
#endif // #ifdef USESSE2
/* ---- Code that requires AVX2. Intel Haswell (2013) and later. ---- */
/* ---- Worker code that uses AVX2 ----
*
* The following code makes use of the AVX2 instruction set and specialized
* 32 byte registers. The AVX2 instructions are present on newer x86
* processors. The first Intel processor microarchitecture supporting AVX2 was
* Haswell (2013).
*
*/
#ifdef USEAVX2
/* Transpose bits within bytes. */
int64_t bshuf_trans_bit_byte_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
size_t ii, kk;
const char* in_b = (const char*) in;
char* out_b = (char*) out;
int32_t* out_i32;
size_t nbyte = elem_size * size;
int64_t count;
__m256i ymm;
int32_t bt;
for (ii = 0; ii + 31 < nbyte; ii += 32) {
ymm = _mm256_loadu_si256((__m256i *) &in_b[ii]);
for (kk = 0; kk < 8; kk++) {
bt = _mm256_movemask_epi8(ymm);
ymm = _mm256_slli_epi16(ymm, 1);
out_i32 = (int32_t*) &out_b[((7 - kk) * nbyte + ii) / 8];
*out_i32 = bt;
}
}
count = bshuf_trans_bit_byte_remainder(in, out, size, elem_size,
nbyte - nbyte % 32);
return count;
}
/* Transpose bits within elements. */
int64_t bshuf_trans_bit_elem_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
CHECK_MULT_EIGHT(size);
void* tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
count = bshuf_trans_byte_elem_SSE(in, out, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_trans_bit_byte_AVX(out, tmp_buf, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_trans_bitrow_eight(tmp_buf, out, size, elem_size);
free(tmp_buf);
return count;
}
/* For data organized into a row for each bit (8 * elem_size rows), transpose
* the bytes. */
int64_t bshuf_trans_byte_bitrow_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
size_t hh, ii, jj, kk, mm;
const char* in_b = (const char*) in;
char* out_b = (char*) out;
CHECK_MULT_EIGHT(size);
size_t nrows = 8 * elem_size;
size_t nbyte_row = size / 8;
if (elem_size % 4) return bshuf_trans_byte_bitrow_SSE(in, out, size,
elem_size);
__m256i ymm_0[8];
__m256i ymm_1[8];
__m256i ymm_storeage[8][4];
for (jj = 0; jj + 31 < nbyte_row; jj += 32) {
for (ii = 0; ii + 3 < elem_size; ii += 4) {
for (hh = 0; hh < 4; hh ++) {
for (kk = 0; kk < 8; kk ++){
ymm_0[kk] = _mm256_loadu_si256((__m256i *) &in_b[
(ii * 8 + hh * 8 + kk) * nbyte_row + jj]);
}
for (kk = 0; kk < 4; kk ++){
ymm_1[kk] = _mm256_unpacklo_epi8(ymm_0[kk * 2],
ymm_0[kk * 2 + 1]);
ymm_1[kk + 4] = _mm256_unpackhi_epi8(ymm_0[kk * 2],
ymm_0[kk * 2 + 1]);
}
for (kk = 0; kk < 2; kk ++){
for (mm = 0; mm < 2; mm ++){
ymm_0[kk * 4 + mm] = _mm256_unpacklo_epi16(
ymm_1[kk * 4 + mm * 2],
ymm_1[kk * 4 + mm * 2 + 1]);
ymm_0[kk * 4 + mm + 2] = _mm256_unpackhi_epi16(
ymm_1[kk * 4 + mm * 2],
ymm_1[kk * 4 + mm * 2 + 1]);
}
}
for (kk = 0; kk < 4; kk ++){
ymm_1[kk * 2] = _mm256_unpacklo_epi32(ymm_0[kk * 2],
ymm_0[kk * 2 + 1]);
ymm_1[kk * 2 + 1] = _mm256_unpackhi_epi32(ymm_0[kk * 2],
ymm_0[kk * 2 + 1]);
}
for (kk = 0; kk < 8; kk ++){
ymm_storeage[kk][hh] = ymm_1[kk];
}
}
for (mm = 0; mm < 8; mm ++) {
for (kk = 0; kk < 4; kk ++){
ymm_0[kk] = ymm_storeage[mm][kk];
}
ymm_1[0] = _mm256_unpacklo_epi64(ymm_0[0], ymm_0[1]);
ymm_1[1] = _mm256_unpacklo_epi64(ymm_0[2], ymm_0[3]);
ymm_1[2] = _mm256_unpackhi_epi64(ymm_0[0], ymm_0[1]);
ymm_1[3] = _mm256_unpackhi_epi64(ymm_0[2], ymm_0[3]);
ymm_0[0] = _mm256_permute2x128_si256(ymm_1[0], ymm_1[1], 32);
ymm_0[1] = _mm256_permute2x128_si256(ymm_1[2], ymm_1[3], 32);
ymm_0[2] = _mm256_permute2x128_si256(ymm_1[0], ymm_1[1], 49);
ymm_0[3] = _mm256_permute2x128_si256(ymm_1[2], ymm_1[3], 49);
_mm256_storeu_si256((__m256i *) &out_b[
(jj + mm * 2 + 0 * 16) * nrows + ii * 8], ymm_0[0]);
_mm256_storeu_si256((__m256i *) &out_b[
(jj + mm * 2 + 0 * 16 + 1) * nrows + ii * 8], ymm_0[1]);
_mm256_storeu_si256((__m256i *) &out_b[
(jj + mm * 2 + 1 * 16) * nrows + ii * 8], ymm_0[2]);
_mm256_storeu_si256((__m256i *) &out_b[
(jj + mm * 2 + 1 * 16 + 1) * nrows + ii * 8], ymm_0[3]);
}
}
}
for (ii = 0; ii < nrows; ii ++ ) {
for (jj = nbyte_row - nbyte_row % 32; jj < nbyte_row; jj ++) {
out_b[jj * nrows + ii] = in_b[ii * nbyte_row + jj];
}
}
return size * elem_size;
}
/* Shuffle bits within the bytes of eight element blocks. */
int64_t bshuf_shuffle_bit_eightelem_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
CHECK_MULT_EIGHT(size);
// With a bit of care, this could be written such that such that it is
// in_buf = out_buf safe.
const char* in_b = (const char*) in;
char* out_b = (char*) out;
size_t ii, jj, kk;
size_t nbyte = elem_size * size;
__m256i ymm;
int32_t bt;
if (elem_size % 4) {
return bshuf_shuffle_bit_eightelem_SSE(in, out, size, elem_size);
} else {
for (jj = 0; jj + 31 < 8 * elem_size; jj += 32) {
for (ii = 0; ii + 8 * elem_size - 1 < nbyte;
ii += 8 * elem_size) {
ymm = _mm256_loadu_si256((__m256i *) &in_b[ii + jj]);
for (kk = 0; kk < 8; kk++) {
bt = _mm256_movemask_epi8(ymm);
ymm = _mm256_slli_epi16(ymm, 1);
size_t ind = (ii + jj / 8 + (7 - kk) * elem_size);
* (int32_t *) &out_b[ind] = bt;
}
}
}
}
return size * elem_size;
}
/* Untranspose bits within elements. */
int64_t bshuf_untrans_bit_elem_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
CHECK_MULT_EIGHT(size);
void* tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
count = bshuf_trans_byte_bitrow_AVX(in, tmp_buf, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_shuffle_bit_eightelem_AVX(tmp_buf, out, size, elem_size);
free(tmp_buf);
return count;
}
#else // #ifdef USEAVX2
int64_t bshuf_trans_bit_byte_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -12;
}
int64_t bshuf_trans_bit_elem_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -12;
}
int64_t bshuf_trans_byte_bitrow_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -12;
}
int64_t bshuf_shuffle_bit_eightelem_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -12;
}
int64_t bshuf_untrans_bit_elem_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -12;
}
#endif // #ifdef USEAVX2
/* ---- Drivers selecting best instruction set at compile time. ---- */
int64_t bshuf_trans_bit_elem(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
#ifdef USEAVX2
count = bshuf_trans_bit_elem_AVX(in, out, size, elem_size);
#elif defined(USESSE2)
count = bshuf_trans_bit_elem_SSE(in, out, size, elem_size);
#elif defined(USEARMNEON)
count = bshuf_trans_bit_elem_NEON(in, out, size, elem_size);
#else
count = bshuf_trans_bit_elem_scal(in, out, size, elem_size);
#endif
return count;
}
int64_t bshuf_untrans_bit_elem(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
#ifdef USEAVX2
count = bshuf_untrans_bit_elem_AVX(in, out, size, elem_size);
#elif defined(USESSE2)
count = bshuf_untrans_bit_elem_SSE(in, out, size, elem_size);
#elif defined(USEARMNEON)
count = bshuf_untrans_bit_elem_NEON(in, out, size, elem_size);
#else
count = bshuf_untrans_bit_elem_scal(in, out, size, elem_size);
#endif
return count;
}
/* ---- Wrappers for implementing blocking ---- */
/* Wrap a function for processing a single block to process an entire buffer in
* parallel. */
int64_t bshuf_blocked_wrap_fun(bshufBlockFunDef fun, const void* in, void* out, \
const size_t size, const size_t elem_size, size_t block_size) {
omp_size_t ii = 0;
int64_t err = 0;
int64_t count, cum_count=0;
size_t last_block_size;
size_t leftover_bytes;
size_t this_iter;
char *last_in;
char *last_out;
ioc_chain C;
ioc_init(&C, in, out);
if (block_size == 0) {
block_size = bshuf_default_block_size(elem_size);
}
if (block_size % BSHUF_BLOCKED_MULT) return -81;
#if defined(_OPENMP)
#pragma omp parallel for schedule(dynamic, 1) \
private(count) reduction(+ : cum_count)
#endif
for (ii = 0; ii < (omp_size_t)( size / block_size ); ii ++) {
count = fun(&C, block_size, elem_size);
if (count < 0) err = count;
cum_count += count;
}
last_block_size = size % block_size;
last_block_size = last_block_size - last_block_size % BSHUF_BLOCKED_MULT;
if (last_block_size) {
count = fun(&C, last_block_size, elem_size);
if (count < 0) err = count;
cum_count += count;
}
if (err < 0) return err;
leftover_bytes = size % BSHUF_BLOCKED_MULT * elem_size;
//this_iter;
last_in = (char *) ioc_get_in(&C, &this_iter);
ioc_set_next_in(&C, &this_iter, (void *) (last_in + leftover_bytes));
last_out = (char *) ioc_get_out(&C, &this_iter);
ioc_set_next_out(&C, &this_iter, (void *) (last_out + leftover_bytes));
memcpy(last_out, last_in, leftover_bytes);
ioc_destroy(&C);
return cum_count + leftover_bytes;
}
/* Bitshuffle a single block. */
int64_t bshuf_bitshuffle_block(ioc_chain *C_ptr, \
const size_t size, const size_t elem_size) {
size_t this_iter;
const void *in;
void *out;
int64_t count;
in = ioc_get_in(C_ptr, &this_iter);
ioc_set_next_in(C_ptr, &this_iter,
(void*) ((char*) in + size * elem_size));
out = ioc_get_out(C_ptr, &this_iter);
ioc_set_next_out(C_ptr, &this_iter,
(void *) ((char *) out + size * elem_size));
count = bshuf_trans_bit_elem(in, out, size, elem_size);
return count;
}
/* Bitunshuffle a single block. */
int64_t bshuf_bitunshuffle_block(ioc_chain* C_ptr, \
const size_t size, const size_t elem_size) {
size_t this_iter;
const void *in;
void *out;
int64_t count;
in = ioc_get_in(C_ptr, &this_iter);
ioc_set_next_in(C_ptr, &this_iter,
(void*) ((char*) in + size * elem_size));
out = ioc_get_out(C_ptr, &this_iter);
ioc_set_next_out(C_ptr, &this_iter,
(void *) ((char *) out + size * elem_size));
count = bshuf_untrans_bit_elem(in, out, size, elem_size);
return count;
}
/* Write a 64 bit unsigned integer to a buffer in big endian order. */
void bshuf_write_uint64_BE(void* buf, uint64_t num) {
int ii;
uint8_t* b = (uint8_t*) buf;
uint64_t pow28 = 1 << 8;
for (ii = 7; ii >= 0; ii--) {
b[ii] = num % pow28;
num = num / pow28;
}
}
/* Read a 64 bit unsigned integer from a buffer big endian order. */
uint64_t bshuf_read_uint64_BE(void* buf) {
int ii;
uint8_t* b = (uint8_t*) buf;
uint64_t num = 0, pow28 = 1 << 8, cp = 1;
for (ii = 7; ii >= 0; ii--) {
num += b[ii] * cp;
cp *= pow28;
}
return num;
}
/* Write a 32 bit unsigned integer to a buffer in big endian order. */
void bshuf_write_uint32_BE(void* buf, uint32_t num) {
int ii;
uint8_t* b = (uint8_t*) buf;
uint32_t pow28 = 1 << 8;
for (ii = 3; ii >= 0; ii--) {
b[ii] = num % pow28;
num = num / pow28;
}
}
/* Read a 32 bit unsigned integer from a buffer big endian order. */
uint32_t bshuf_read_uint32_BE(const void* buf) {
int ii;
uint8_t* b = (uint8_t*) buf;
uint32_t num = 0, pow28 = 1 << 8, cp = 1;
for (ii = 3; ii >= 0; ii--) {
num += b[ii] * cp;
cp *= pow28;
}
return num;
}
/* ---- Public functions ----
*
* See header file for description and usage.
*
*/
size_t bshuf_default_block_size(const size_t elem_size) {
// This function needs to be absolutely stable between versions.
// Otherwise encoded data will not be decodable.
size_t block_size = BSHUF_TARGET_BLOCK_SIZE_B / elem_size;
// Ensure it is a required multiple.
block_size = (block_size / BSHUF_BLOCKED_MULT) * BSHUF_BLOCKED_MULT;
return MAX(block_size, BSHUF_MIN_RECOMMEND_BLOCK);
}
int64_t bshuf_bitshuffle(const void* in, void* out, const size_t size,
const size_t elem_size, size_t block_size) {
return bshuf_blocked_wrap_fun(&bshuf_bitshuffle_block, in, out, size,
elem_size, block_size);
}
int64_t bshuf_bitunshuffle(const void* in, void* out, const size_t size,
const size_t elem_size, size_t block_size) {
return bshuf_blocked_wrap_fun(&bshuf_bitunshuffle_block, in, out, size,
elem_size, block_size);
}
#undef TRANS_BIT_8X8
#undef TRANS_ELEM_TYPE
#undef MAX
#undef CHECK_MULT_EIGHT
#undef CHECK_ERR_FREE
#undef USESSE2
#undef USEAVX2
|
MatrixOperations.h | // Copyright (c) 2004-2022 Tomáš Oberhuber et al.
//
// This file is part of TNL - Template Numerical Library (https://tnl-project.org/)
//
// SPDX-License-Identifier: MIT
// Implemented by: Jakub Klinkovsky
#pragma once
/*
* TODO: This is just a temporary file, used only in the CWYGMRES solver.
* The algorithms should be incorporated into the Matrices::Dense class.
*/
#include <memory> // std::unique_ptr
#include <noa/3rdparty/tnl-noa/src/TNL/Exceptions/CudaSupportMissing.h>
#include <noa/3rdparty/tnl-noa/src/TNL/Devices/Host.h>
#include <noa/3rdparty/tnl-noa/src/TNL/Devices/Cuda.h>
#include <noa/3rdparty/tnl-noa/src/TNL/Math.h>
#include <noa/3rdparty/tnl-noa/src/TNL/Cuda/DeviceInfo.h>
#include <noa/3rdparty/tnl-noa/src/TNL/Cuda/SharedMemory.h>
namespace noa::TNL {
namespace Matrices {
template< typename DeviceType = Devices::Host >
class MatrixOperations
{
public:
/*
* This function performs the matrix-vector multiplication
* y = alpha * A * x + beta * y
* where:
* alpha and beta are scalars,
* A is an (lda by n) matrix stored in column-major format,
* lda >= m is the leading dimension of two-dimensional array used to store matrix A,
* x is a vector of n elements,
* y is a vector of m elements.
*
* It is assumed that n is much smaller than m.
*/
template< typename RealType, typename IndexType >
static void
gemv( const IndexType m,
const IndexType n,
const RealType alpha,
const RealType* A,
const IndexType lda,
const RealType* x,
const RealType beta,
RealType* y )
{
TNL_ASSERT_GT( m, 0, "m must be positive" );
TNL_ASSERT_GT( n, 0, "n must be positive" );
TNL_ASSERT_GE( lda, m, "lda must be at least m" );
std::unique_ptr< RealType[] > alphax{ new RealType[ n ] };
for( IndexType k = 0; k < n; k++ )
alphax[ k ] = alpha * x[ k ];
if( n == 1 ) {
if( beta != 0.0 ) {
#ifdef HAVE_OPENMP
#pragma omp parallel for if( TNL::Devices::Host::isOMPEnabled() )
#endif
for( IndexType j = 0; j < m; j++ )
y[ j ] = A[ j ] * alphax[ 0 ] + beta * y[ j ];
}
else {
// the vector y might be uninitialized, and 0.0 * NaN = NaN
#ifdef HAVE_OPENMP
#pragma omp parallel for if( TNL::Devices::Host::isOMPEnabled() )
#endif
for( IndexType j = 0; j < m; j++ )
y[ j ] = A[ j ] * alphax[ 0 ];
}
}
else {
// the matrix A should be accessed column-wise so we split the work into small
// blocks and each block process by columns, either parallelly or serially
constexpr IndexType block_size = 128;
const IndexType blocks = m / block_size;
#ifdef HAVE_OPENMP
#pragma omp parallel if( TNL::Devices::Host::isOMPEnabled() && blocks >= 2 )
#endif
{
RealType aux[ block_size ];
#ifdef HAVE_OPENMP
#pragma omp for nowait
#endif
for( IndexType b = 0; b < blocks; b++ ) {
const IndexType block_offset = b * block_size;
// initialize array for thread-local results
for( IndexType j = 0; j < block_size; j++ )
aux[ j ] = 0.0;
// compute aux = A * alphax
for( IndexType k = 0; k < n; k++ ) {
const IndexType offset = block_offset + k * lda;
for( IndexType j = 0; j < block_size; j++ )
aux[ j ] += A[ offset + j ] * alphax[ k ];
}
// write result: y = aux + beta * y
if( beta != 0.0 ) {
for( IndexType j = 0; j < block_size; j++ )
y[ block_offset + j ] = aux[ j ] + beta * y[ block_offset + j ];
}
else {
// the vector y might be uninitialized, and 0.0 * NaN = NaN
for( IndexType j = 0; j < block_size; j++ )
y[ block_offset + j ] = aux[ j ];
}
}
// the first thread that reaches here processes the last, incomplete block
#ifdef HAVE_OPENMP
#pragma omp single nowait
#endif
{
// TODO: unlike the complete blocks, the tail is traversed row-wise
if( beta != 0.0 ) {
for( IndexType j = blocks * block_size; j < m; j++ ) {
RealType tmp = 0.0;
for( IndexType k = 0; k < n; k++ )
tmp += A[ j + k * lda ] * alphax[ k ];
y[ j ] = tmp + beta * y[ j ];
}
}
else {
// the vector y might be uninitialized, and 0.0 * NaN = NaN
for( IndexType j = blocks * block_size; j < m; j++ ) {
RealType tmp = 0.0;
for( IndexType k = 0; k < n; k++ )
tmp += A[ j + k * lda ] * alphax[ k ];
y[ j ] = tmp;
}
}
}
}
}
}
/*
* This function performs the matrix-matrix addition
* C = alpha * A + beta * B
* where:
* alpha and beta are scalars,
* A, B, C are (m by n) matrices stored in column-major format on Devices::Cuda,
* lda, ldb, ldc (all >= m) are the leading dimensions of matrices A, B, C,
* respectively.
*
* It is assumed that n is much smaller than m.
*/
template< typename RealType, typename IndexType >
static void
geam( const IndexType m,
const IndexType n,
const RealType alpha,
const RealType* A,
const IndexType lda,
const RealType beta,
const RealType* B,
const IndexType ldb,
RealType* C,
const IndexType ldc )
{
TNL_ASSERT_GT( m, 0, "m must be positive" );
TNL_ASSERT_GT( n, 0, "n must be positive" );
TNL_ASSERT_GE( lda, m, "lda must be at least m" );
TNL_ASSERT_GE( ldb, m, "lda must be at least m" );
TNL_ASSERT_GE( ldc, m, "lda must be at least m" );
if( n == 1 ) {
#ifdef HAVE_OPENMP
#pragma omp parallel for if( TNL::Devices::Host::isOMPEnabled() )
#endif
for( IndexType j = 0; j < m; j++ )
C[ j ] = alpha * A[ j ] + beta * B[ j ];
}
else {
// all matrices should be accessed column-wise so we split the work into small
// blocks and each block process by columns, either parallelly or serially
constexpr IndexType block_size = 128;
const IndexType blocks = m / block_size;
#ifdef HAVE_OPENMP
#pragma omp parallel if( TNL::Devices::Host::isOMPEnabled() && blocks >= 2 )
#endif
{
#ifdef HAVE_OPENMP
#pragma omp for nowait
#endif
for( IndexType b = 0; b < blocks; b++ ) {
const IndexType block_offset = b * block_size;
for( IndexType j = 0; j < n; j++ ) {
const IndexType offset_A = j * lda + block_offset;
const IndexType offset_B = j * ldb + block_offset;
const IndexType offset_C = j * ldc + block_offset;
for( IndexType i = 0; i < block_size; i++ )
C[ offset_C + i ] = alpha * A[ offset_A + i ] + beta * B[ offset_B + i ];
}
}
// the first thread that reaches here processes the last, incomplete block
#ifdef HAVE_OPENMP
#pragma omp single nowait
#endif
{
for( IndexType j = 0; j < n; j++ ) {
const IndexType offset_A = j * lda;
const IndexType offset_B = j * ldb;
const IndexType offset_C = j * ldc;
for( IndexType i = blocks * block_size; i < m; i++ )
C[ offset_C + i ] = alpha * A[ offset_A + i ] + beta * B[ offset_B + i ];
}
}
}
}
}
};
// CUDA kernels
#ifdef HAVE_CUDA
template< typename RealType, typename IndexType >
__global__
void
GemvCudaKernel( const IndexType m,
const IndexType n,
const RealType alpha,
const RealType* A,
const IndexType lda,
const RealType* x,
const RealType beta,
RealType* y )
{
IndexType elementIdx = blockIdx.x * blockDim.x + threadIdx.x;
const IndexType gridSize = blockDim.x * gridDim.x;
RealType* shx = Cuda::getSharedMemory< RealType >();
if( threadIdx.x < n )
shx[ threadIdx.x ] = alpha * x[ threadIdx.x ];
__syncthreads();
if( beta != 0.0 ) {
while( elementIdx < m ) {
RealType tmp = 0.0;
for( IndexType k = 0; k < n; k++ )
tmp += A[ elementIdx + k * lda ] * shx[ k ];
y[ elementIdx ] = tmp + beta * y[ elementIdx ];
elementIdx += gridSize;
}
}
else {
// the vector y might be uninitialized, and 0.0 * NaN = NaN
while( elementIdx < m ) {
RealType tmp = 0.0;
for( IndexType k = 0; k < n; k++ )
tmp += A[ elementIdx + k * lda ] * shx[ k ];
y[ elementIdx ] = tmp;
elementIdx += gridSize;
}
}
}
template< typename RealType, typename IndexType >
__global__
void
GeamCudaKernel( const IndexType m,
const IndexType n,
const RealType alpha,
const RealType* A,
const IndexType lda,
const RealType beta,
const RealType* B,
const IndexType ldb,
RealType* C,
const IndexType ldc )
{
IndexType x = blockIdx.x * blockDim.x + threadIdx.x;
const IndexType gridSizeX = blockDim.x * gridDim.x;
const IndexType y = blockIdx.y * blockDim.y + threadIdx.y;
const IndexType offset_A = y * lda;
const IndexType offset_B = y * ldb;
const IndexType offset_C = y * ldc;
if( y < n )
while( x < m ) {
C[ x + offset_C ] = alpha * A[ x + offset_A ] + beta * B[ x + offset_B ];
x += gridSizeX;
}
}
#endif
// specialization for CUDA
template<>
class MatrixOperations< Devices::Cuda >
{
public:
/*
* This function performs the matrix-vector multiplication
* y = alpha * A * x + beta * y
* where:
* alpha and beta are scalars,
* A is an (lda by n) matrix stored in column-major format on Devices::Cuda,
* lda >= m is the leading dimension of two-dimensional array used to store matrix A,
* x is a vector of n elements, stored on Devices::Host,
* y is a vector of m elements, stored on Devices::Cuda.
*
* It is assumed that n is much smaller than m.
*/
template< typename RealType, typename IndexType >
static void
gemv( const IndexType m,
const IndexType n,
const RealType alpha,
const RealType* A,
const IndexType lda,
const RealType* x,
const RealType beta,
RealType* y )
{
TNL_ASSERT( m <= lda, );
TNL_ASSERT( n <= 256,
std::cerr << "The gemv kernel is optimized only for small 'n' and assumes that n <= 256." << std::endl; );
#ifdef HAVE_CUDA
// TODO: use static storage, e.g. from the CudaReductionBuffer, to avoid frequent reallocations
Containers::Vector< RealType, Devices::Cuda, IndexType > xDevice;
xDevice.setSize( n );
Algorithms::MultiDeviceMemoryOperations< Devices::Cuda, Devices::Host >::copy< RealType, RealType, IndexType >(
xDevice.getData(), x, n );
// desGridSize = blocksPerMultiprocessor * numberOfMultiprocessors
const int desGridSize = 32 * Cuda::DeviceInfo::getCudaMultiprocessors( Cuda::DeviceInfo::getActiveDevice() );
dim3 blockSize, gridSize;
blockSize.x = 256;
gridSize.x = min( desGridSize, Cuda::getNumberOfBlocks( m, blockSize.x ) );
GemvCudaKernel<<< gridSize, blockSize,
n * sizeof( RealType ) >>>( m, n, alpha, A, lda, xDevice.getData(), beta, y );
TNL_CHECK_CUDA_DEVICE;
#else
throw Exceptions::CudaSupportMissing();
#endif
}
/*
* This function performs the matrix-matrix addition
* C = alpha * A + beta * B
* where:
* alpha and beta are scalars,
* A, B, C are (m by n) matrices stored in column-major format on Devices::Cuda,
* lda, ldb, ldc (all >= m) are the leading dimensions of matrices A, B, C,
* respectively.
*
* It is assumed that n is much smaller than m.
*/
template< typename RealType, typename IndexType >
static void
geam( const IndexType m,
const IndexType n,
const RealType alpha,
const RealType* A,
const IndexType lda,
const RealType beta,
const RealType* B,
const IndexType ldb,
RealType* C,
const IndexType ldc )
{
TNL_ASSERT_GT( m, 0, "m must be positive" );
TNL_ASSERT_GT( n, 0, "n must be positive" );
TNL_ASSERT_GE( lda, m, "lda must be at least m" );
TNL_ASSERT_GE( ldb, m, "lda must be at least m" );
TNL_ASSERT_GE( ldc, m, "lda must be at least m" );
#ifdef HAVE_CUDA
dim3 blockSize, gridSize;
// max 16 columns of threads
blockSize.y = min( n, 16 );
// max 256 threads per block, power of 2
blockSize.x = 256;
while( blockSize.x * blockSize.y > 256 )
blockSize.x /= 2;
// desGridSize = blocksPerMultiprocessor * numberOfMultiprocessors
const int desGridSize = 32 * Cuda::DeviceInfo::getCudaMultiprocessors( Cuda::DeviceInfo::getActiveDevice() );
gridSize.x = min( desGridSize, Cuda::getNumberOfBlocks( m, blockSize.x ) );
gridSize.y = Cuda::getNumberOfBlocks( n, blockSize.y );
GeamCudaKernel<<< gridSize, blockSize >>>( m, n, alpha, A, lda, beta, B, ldb, C, ldc );
TNL_CHECK_CUDA_DEVICE;
#else
throw Exceptions::CudaSupportMissing();
#endif
}
};
} // namespace Matrices
} // namespace noa::TNL
|
tm_efficientdet_uint8.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Author: zylo117
*/
#include <stdlib.h>
#include <stdio.h>
#include "common.h"
#include "tengine/c_api.h"
#include "tengine_operations.h"
#define DEFAULT_IMG_H 512
#define DEFAULT_IMG_W 512
#define DEFAULT_SCALE1 0.017124754f
#define DEFAULT_SCALE2 0.017507003f
#define DEFAULT_SCALE3 0.017429194f
#define DEFAULT_MEAN1 123.675
#define DEFAULT_MEAN2 116.280
#define DEFAULT_MEAN3 103.530
#define DEFAULT_LOOP_COUNT 1
#define DEFAULT_THREAD_COUNT 1
#define DEFAULT_CPU_AFFINITY 255
typedef struct Box
{
int x0;
int y0;
int x1;
int y1;
int class_idx;
float score;
} Box_t;
void qsort_descent_inplace(Box_t* boxes, int left, int right)
{
int i = left;
int j = right;
float p = boxes[(left + right) / 2].score;
while (i <= j)
{
while (boxes[i].score > p)
i++;
while (boxes[j].score < p)
j--;
if (i <= j)
{
// swap
Box_t tmp = boxes[i];
boxes[i] = boxes[j];
boxes[j] = tmp;
i++;
j--;
}
}
#pragma omp parallel sections
{
#pragma omp section
{
if (left < j) qsort_descent_inplace(boxes, left, j);
}
#pragma omp section
{
if (i < right) qsort_descent_inplace(boxes, i, right);
}
}
}
int nms(const Box_t* boxes, const int num_boxes, int* suppressed, float nms_threshold)
{
int num_outputs = num_boxes;
float* areas = malloc(num_boxes * sizeof(float));
for (int i = 0; i < num_boxes; i++)
{
areas[i] = (float)((boxes[i].x1 - boxes[i].x0) * (boxes[i].y1 - boxes[i].y0));
}
for (int i = 0; i < num_boxes; i++)
{
const Box_t a = boxes[i];
if (suppressed[i] == 1)
continue;
for (int j = i + 1; j < num_boxes; j++)
{
const Box_t b = boxes[j];
if (suppressed[j] == 1)
continue;
// iou
float intersection = fmaxf(fmin(a.x1, b.x1) - fmaxf(a.x0, b.x0), 0) * fmaxf(fminf(a.y1, b.y1) - fmaxf(a.y0, b.y0), 0);
float total_area = (a.x1 - a.x0) * (a.y1 - a.y0) + (b.x1 - b.x0) * (b.y1 - b.y0) - intersection;
float iou = fmaxf(intersection / total_area, 0);
if (iou > nms_threshold)
{
suppressed[j] = 1;
num_outputs--;
}
else
{
suppressed[j] = 0;
}
}
}
free(areas);
return num_outputs;
}
float* arange(int start, int end, float stride)
{
int length = (int)((float)ceilf((float)(end - start) / stride));
float* result = malloc(length * sizeof(float));
result[0] = (float)start;
for (int i = 1; i < length; i++)
{
result[i] = result[i - 1] + stride;
}
return result;
}
void tile(const float* arr, int arr_length, int times, float offset,
float* result, int arr_starts_from, int arr_stride)
{
int length = arr_length * times;
if (result == NULL)
{
result = malloc(length * sizeof(float));
arr_starts_from = 0;
}
for (int i = 0, j = 0; i < length; i++, j += arr_stride)
{
result[j + arr_starts_from] = arr[i % arr_length] + offset;
}
}
void repeat(const float* arr, int arr_length, int times, float offset,
float* result, int arr_starts_from, int arr_stride)
{
int length = arr_length * times;
if (result == NULL)
{
result = malloc(length * sizeof(float));
arr_starts_from = 0;
}
for (int i = 0, j = 0; i < length; i++, j += arr_stride)
{
result[j + arr_starts_from] = arr[i / times] + offset;
}
}
int argmax(const float* arr, int arr_starts_from, int arr_length)
{
float max_value = arr[arr_starts_from];
int max_idx = 0;
for (int i = 1; i < arr_length; i++)
{
float this_value = arr[arr_starts_from + i];
if (this_value > max_value)
{
max_value = this_value;
max_idx = i;
}
}
return max_idx;
}
int tengine_detect(const char* model_file, const char* image_file, int img_h, int img_w, const float* mean,
const float* scale, int loop_count, int num_thread, int affinity)
{
/* setup network */
const char* CLASSES_NAME[] = {"person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light",
"fire hydrant", "", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep",
"cow", "elephant", "bear", "zebra", "giraffe", "", "backpack", "umbrella", "", "", "handbag", "tie",
"suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove",
"skateboard", "surfboard", "tennis racket", "bottle", "", "wine glass", "cup", "fork", "knife", "spoon",
"bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut",
"cake", "chair", "couch", "potted plant", "bed", "", "dining table", "", "", "toilet", "", "tv",
"laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink",
"refrigerator", "", "book", "clock", "vase", "scissors", "teddy bear", "hair drier",
"toothbrush"};
int PYRAMID_LEVELS[] = {3, 4, 5, 6, 7};
int STRIDES[] = {8, 16, 32, 64, 128};
float SCALES[] = {
(float)pow(2, 0.),
(float)pow(2, 1. / 3.),
(float)pow(2, 2. / 3.),
};
float RATIOS_X[] = {1.f, 1.4f, 0.7f};
float RATIOS_Y[] = {1.f, 0.7f, 1.4f};
float ANCHOR_SCALE = 4.f;
float CONFIDENCE_THRESHOLD = 0.2f;
float NMS_THRESHOLD = 0.2f;
int num_levels = sizeof(PYRAMID_LEVELS) / sizeof(int);
int num_scales = sizeof(SCALES) / sizeof(float);
int num_ratios = sizeof(RATIOS_X) / sizeof(float);
/* set runtime options */
struct options opt;
opt.num_thread = num_thread;
opt.cluster = TENGINE_CLUSTER_ALL;
opt.precision = TENGINE_MODE_UINT8;
opt.affinity = affinity;
/* inital tengine */
if (init_tengine() != 0)
{
fprintf(stderr, "Initial tengine failed.\n");
return -1;
}
fprintf(stderr, "tengine-lite library version: %s\n", get_tengine_version());
/* create graph, load tengine model xxx.tmfile */
graph_t graph = create_graph(NULL, "tengine", model_file);
if (NULL == graph)
{
fprintf(stderr, "Create graph failed.\n");
return -1;
}
/* set the shape, data buffer of input_tensor of the graph */
int img_size = img_h * img_w * 3;
int dims[] = {1, 3, img_h, img_w}; // nchw
uint8_t* input_data = (uint8_t*)malloc(img_size * sizeof(uint8_t));
tensor_t input_tensor = get_graph_input_tensor(graph, 0, 0);
if (input_tensor == NULL)
{
fprintf(stderr, "Get input tensor failed\n");
return -1;
}
if (set_tensor_shape(input_tensor, dims, 4) < 0)
{
fprintf(stderr, "Set input tensor shape failed\n");
return -1;
}
if (set_tensor_buffer(input_tensor, input_data, img_size) < 0)
{
fprintf(stderr, "Set input tensor buffer failed\n");
return -1;
}
/* prerun graph, set work options(num_thread, cluster, precision) */
if (prerun_graph_multithread(graph, opt) < 0)
{
fprintf(stderr, "Prerun multithread graph failed.\n");
return -1;
}
/* prepare process input data, set the data mem to input tensor */
float means[3] = {mean[0], mean[1], mean[2]};
float scales[3] = {scale[0], scale[1], scale[2]};
image im = imread(image_file);
image im_vis = copy_image(im);
im = imread2caffe(im, img_w, img_h, means, scales);
int raw_h = im.h;
int raw_w = im.w;
int resized_h, resized_w;
float resize_scale;
image resImg;
if (raw_h > raw_w)
{
resized_h = img_h;
resized_w = (int)((float)img_h / raw_h * raw_w);
resImg = resize_image(im, resized_w, img_h);
resize_scale = (float)raw_h / img_h;
}
else
{
resized_w = img_w;
resized_h = (int)((float)img_w / raw_w * raw_h);
resImg = resize_image(im, img_w, resized_h);
resize_scale = (float)raw_w / img_w;
}
free_image(im);
image paddedImg = copyMaker(resImg, 0, img_h - resized_h, 0, img_w - resized_w, 0);
free_image(resImg);
// memcpy(input_data, paddedImg.data, sizeof(float) * paddedImg.c * img_w * img_h);
/* quant fp32 to uint8 */
float input_scale = 0.f;
int input_zero_point = 0;
get_tensor_quant_param(input_tensor, &input_scale, &input_zero_point, 1);
for (int i = 0; i < paddedImg.c * img_w * img_h; i++)
{
int udata = (round)(paddedImg.data[i] / input_scale + input_zero_point);
if (udata > 255)
udata = 255;
else if (udata < 0)
udata = 0;
input_data[i] = udata;
}
free_image(paddedImg);
/* run graph */
double min_time = DBL_MAX;
double max_time = DBL_MIN;
double total_time = 0.;
for (int i = 0; i < loop_count; i++)
{
double start = get_current_time();
if (run_graph(graph, 1) < 0)
{
fprintf(stderr, "Run graph failed\n");
return -1;
}
double end = get_current_time();
double cur = end - start;
total_time += cur;
if (min_time > cur)
min_time = cur;
if (max_time < cur)
max_time = cur;
}
fprintf(stderr, "\nmodel file : %s\n", model_file);
fprintf(stderr, "image file : %s\n", image_file);
fprintf(stderr, "img_h, img_w, scale[3], mean[3] : %d %d , %.3f %.3f %.3f, %.1f %.1f %.1f\n", img_h, img_w,
scale[0], scale[1], scale[2], mean[0], mean[1], mean[2]);
fprintf(stderr, "Repeat %d times, thread %d, avg time %.2f ms, max_time %.2f ms, min_time %.2f ms\n", loop_count,
num_thread, total_time / loop_count, max_time, min_time);
fprintf(stderr, "--------------------------------------\n");
/* get the result of classification */
tensor_t output_tensor_regression = get_graph_output_tensor(graph, 0, 0);
uint8_t* output_data_regression_u8 = (uint8_t*)get_tensor_buffer(output_tensor_regression);
int num_anchors_data = get_tensor_buffer_size(output_tensor_regression);
int num_anchors = get_tensor_buffer_size(output_tensor_regression) / 4;
tensor_t output_tensor_classification = get_graph_output_tensor(graph, 1, 0);
uint8_t* output_data_classification_u8 = (uint8_t*)get_tensor_buffer(output_tensor_classification);
int num_classes_data = get_tensor_buffer_size(output_tensor_classification);
int num_classes = get_tensor_buffer_size(output_tensor_classification) / num_anchors;
/* dequant uint8 to fp32 */
float output_scale_regression = 0.f;
int output_zero_point_regression = 0;
get_tensor_quant_param(output_tensor_regression, &output_scale_regression, &output_zero_point_regression, 1);
float* output_data_regression = (float*)malloc(num_anchors_data * sizeof(float));
for (int i = 0; i < num_anchors_data; i++)
output_data_regression[i] = ((float)output_data_regression_u8[i] - (float)output_zero_point_regression) * output_scale_regression;
float output_scale_classification = 0.f;
int output_zero_point_classification = 0;
get_tensor_quant_param(output_tensor_classification, &output_scale_classification, &output_zero_point_classification, 1);
float* output_data_classification = (float*)malloc(num_classes_data * sizeof(float));
for (int i = 0; i < num_classes_data; i++)
output_data_classification[i] = ((float)output_data_classification_u8[i] - (float)output_zero_point_classification) * output_scale_classification;
// postprocess
// generate anchors
float* anchors_x0 = malloc(num_anchors * sizeof(float));
float* anchors_x1 = malloc(num_anchors * sizeof(float));
float* anchors_y0 = malloc(num_anchors * sizeof(float));
float* anchors_y1 = malloc(num_anchors * sizeof(float));
int anchor_idx = 0;
for (int stride_idx = 0; stride_idx < num_levels; stride_idx++)
{
int stride = STRIDES[stride_idx];
float arange_stride = powf(2, (float)PYRAMID_LEVELS[stride_idx]);
int length_x = (int)ceilf(((float)img_w - (float)stride / 2) / (float)arange_stride);
int length_y = (int)ceilf(((float)img_h - (float)stride / 2) / (float)arange_stride);
float* x = arange(stride / 2, img_w, arange_stride);
float* y = arange(stride / 2, img_h, arange_stride);
int start_idx = anchor_idx;
int num_anchor_types = num_scales * num_ratios;
for (int i = 0; i < num_scales; i++)
{
float anchor_scale = SCALES[i];
float base_anchor_size = ANCHOR_SCALE * (float)stride * anchor_scale;
for (int j = 0; j < num_ratios; j++)
{
float ratio_x = RATIOS_X[j];
float ratio_y = RATIOS_Y[j];
float anchor_size_x_2 = base_anchor_size * ratio_x / 2.f;
float anchor_size_y_2 = base_anchor_size * ratio_y / 2.f;
tile(x, length_x, length_y, -anchor_size_x_2, anchors_x0,
start_idx + i * num_scales + j, num_anchor_types);
repeat(y, length_y, length_x, -anchor_size_y_2, anchors_y0,
start_idx + i * num_scales + j, num_anchor_types);
tile(x, length_x, length_y, anchor_size_x_2, anchors_x1,
start_idx + i * num_scales + j, num_anchor_types);
repeat(y, length_y, length_x, anchor_size_y_2, anchors_y1,
start_idx + i * num_scales + j, num_anchor_types);
anchor_idx += (length_x * length_y);
}
}
free(x);
free(y);
}
// loop over anchors
Box_t* proposals = malloc(sizeof(Box_t) * num_anchors);
int num_proposals_over_threshold = 0;
#pragma omp parallel for num_threads(opt.num_thread)
for (int i = 0; i < num_anchors; i++)
{
// loop over anchors
// confidence
int max_idx = argmax(output_data_classification, i * num_classes, num_classes);
float max_score = output_data_classification[i * num_classes + max_idx];
if (isinf(max_score) || max_score < CONFIDENCE_THRESHOLD)
{
proposals[i].class_idx = -1;
continue;
}
proposals[i].class_idx = max_idx;
proposals[i].score = max_score;
// box transform
float ha = anchors_y1[i] - anchors_y0[i];
float wa = anchors_x1[i] - anchors_x0[i];
float y_center_a = (anchors_y1[i] + anchors_y0[i]) / 2;
float x_center_a = (anchors_x1[i] + anchors_x0[i]) / 2;
float w = expf(output_data_regression[i * 4 + 3]) * wa;
float h = expf(output_data_regression[i * 4 + 2]) * ha;
float y_center = output_data_regression[i * 4] * ha + y_center_a;
float x_center = output_data_regression[i * 4 + 1] * wa + x_center_a;
float ymin = y_center - h / 2;
float xmin = x_center - w / 2;
float ymax = y_center + h / 2;
float xmax = x_center + w / 2;
// scaling
ymin *= resize_scale;
xmin *= resize_scale;
ymax *= resize_scale;
xmax *= resize_scale;
// clipping
xmin = fmaxf(fminf(xmin, (float)(raw_w - 1)), 0.f);
xmax = fmaxf(fminf(xmax, (float)(raw_w - 1)), 0.f);
ymin = fmaxf(fminf(ymin, (float)(raw_h - 1)), 0.f);
ymax = fmaxf(fminf(ymax, (float)(raw_h - 1)), 0.f);
// area filtering
float area = (xmax - xmin) * (ymax - ymin);
if (area < 4)
{
proposals[i].class_idx = -1;
continue;
}
num_proposals_over_threshold++;
proposals[i].x0 = (int)xmin;
proposals[i].x1 = (int)xmax;
proposals[i].y0 = (int)ymin;
proposals[i].y1 = (int)ymax;
}
free(anchors_x0);
free(anchors_x1);
free(anchors_y0);
free(anchors_y1);
// filter boxes with confidence threshold
Box_t* proposals_over_threshold = malloc(sizeof(Box_t) * num_proposals_over_threshold);
int proposals_over_threshold_idx = 0;
for (int i = 0; i < num_anchors; i++)
{
Box_t box = proposals[i];
if (box.class_idx == -1)
continue;
proposals_over_threshold[proposals_over_threshold_idx] = box;
proposals_over_threshold_idx++;
}
free(proposals);
if (num_proposals_over_threshold > 0)
{
// sort boxes
qsort_descent_inplace(proposals_over_threshold, 0, num_proposals_over_threshold - 1);
// nms
int* suppressed = calloc(num_proposals_over_threshold, sizeof(int));
int num_outputs = nms(proposals_over_threshold, num_proposals_over_threshold, suppressed, NMS_THRESHOLD);
Box_t* proposals_after_nms = malloc(num_outputs * sizeof(Box_t));
int proposals_after_nms_idx = 0;
for (int i = 0; i < num_proposals_over_threshold; i++)
{
Box_t box = proposals_over_threshold[i];
if (suppressed[i] == 1)
continue;
proposals_after_nms[proposals_after_nms_idx] = box;
proposals_after_nms_idx++;
}
free(suppressed);
for (int i = 0; i < num_outputs; i++)
{
Box_t box = proposals_after_nms[i];
draw_box(im_vis, box.x0, box.y0, box.x1, box.y1, 2, 125, 0, 125);
fprintf(stderr, "%s\t:%.1f%%\n", CLASSES_NAME[box.class_idx], box.score * 100);
fprintf(stderr, "BOX:( %d , %d ),( %d , %d )\n", box.x0, box.y0, box.x1, box.y1);
}
save_image(im_vis, "efficientdet_uint8_out");
free(proposals_after_nms);
}
free(proposals_over_threshold);
/* release tengine */
free(output_data_regression);
free(output_data_classification);
free(input_data);
postrun_graph(graph);
destroy_graph(graph);
release_tengine();
return 0;
}
void show_usage()
{
fprintf(
stderr,
"[Usage]: [-h]\n [-m model_file] [-i image_file]\n [-g img_h,img_w] [-s scale[0],scale[1],scale[2]] [-w "
"mean[0],mean[1],mean[2]] [-r loop_count] [-t thread_count] [-a cpu_affinity]\n");
fprintf(
stderr,
"\nefficientdet example: \n ./classification -m /path/to/efficientdet.tmfile -i /path/to/img.jpg -g 512,512 -s "
"0.017,0.017,0.017 -w 103.53,116.28,123.675\n");
}
int main(int argc, char* argv[])
{
int loop_count = DEFAULT_LOOP_COUNT;
int num_thread = DEFAULT_THREAD_COUNT;
int cpu_affinity = DEFAULT_CPU_AFFINITY;
char* model_file = NULL;
char* image_file = NULL;
float img_hw[2] = {0.f};
int img_h = 0;
int img_w = 0;
float mean[3] = {-1.f, -1.f, -1.f};
float scale[3] = {0.f, 0.f, 0.f};
int res;
while ((res = getopt(argc, argv, "m:i:l:g:s:w:r:t:a:h")) != -1)
{
switch (res)
{
case 'm':
model_file = optarg;
break;
case 'i':
image_file = optarg;
break;
case 'g':
split(img_hw, optarg, ",");
img_h = (int)img_hw[0];
img_w = (int)img_hw[1];
break;
case 's':
split(scale, optarg, ",");
break;
case 'w':
split(mean, optarg, ",");
break;
case 'r':
loop_count = atoi(optarg);
break;
case 't':
num_thread = atoi(optarg);
break;
case 'a':
cpu_affinity = atoi(optarg);
break;
case 'h':
show_usage();
return 0;
default:
break;
}
}
/* check files */
if (model_file == NULL)
{
fprintf(stderr, "Error: Tengine model file not specified!\n");
show_usage();
return -1;
}
if (image_file == NULL)
{
fprintf(stderr, "Error: Image file not specified!\n");
show_usage();
return -1;
}
if (!check_file_exist(model_file) || !check_file_exist(image_file))
return -1;
if (img_h == 0)
{
img_h = DEFAULT_IMG_H;
fprintf(stderr, "Image height not specified, use default %d\n", img_h);
}
if (img_w == 0)
{
img_w = DEFAULT_IMG_W;
fprintf(stderr, "Image width not specified, use default %d\n", img_w);
}
if (scale[0] == 0.f || scale[1] == 0.f || scale[2] == 0.f)
{
scale[0] = DEFAULT_SCALE1;
scale[1] = DEFAULT_SCALE2;
scale[2] = DEFAULT_SCALE3;
fprintf(stderr, "Scale value not specified, use default %.3f, %.3f, %.3f\n", scale[0], scale[1], scale[2]);
}
if (mean[0] == -1.0 || mean[1] == -1.0 || mean[2] == -1.0)
{
mean[0] = DEFAULT_MEAN1;
mean[1] = DEFAULT_MEAN2;
mean[2] = DEFAULT_MEAN3;
fprintf(stderr, "Mean value not specified, use default %.1f, %.1f, %.1f\n", mean[0], mean[1], mean[2]);
}
if (tengine_detect(model_file, image_file, img_h, img_w, mean, scale, loop_count, num_thread, cpu_affinity) < 0)
return -1;
return 0;
}
|
ballAlgOMP.c | #include "../lib/msort.h"
#include "ballAlg.h"
#include "pointArith.h"
#include <stdlib.h>
extern int nDims;
int buildTreeOMP(double **points, long nPoints, int nThreads) {
if (nPoints == 0) return -1;
double *center = (double *) mallocSafe(sizeof(double) * nDims);
if (nPoints == 1) {
copy(points[0], center);
return newNode(center, 0, -1, -1);
}
double maxD;
const int iA = calcFurthestIdx(points, nPoints, points[0], &maxD);
const int iB = calcFurthestIdx(points, nPoints, points[iA], &maxD);
double *subBA = (double *) mallocSafe(sizeof(double) * nDims);
double *projectionsPoints = (double *) mallocSafe(sizeof(double) * nDims * nPoints);
double **projections = (double **) mallocSafe(sizeof(double *) * nPoints);
double **pointsTmp = (double **) mallocSafe(sizeof(double *) * nPoints);
sub(points[iB], points[iA], subBA);
const double squaredSubBA = innerProduct(subBA, subBA);
for (long i = 0; i < nPoints; i++) {
projections[i] = projectionsPoints + (i * nDims);
if (i == iA || i == iB) {
copy(points[i], projections[i]);
} else {
projection(points[i], points[iA], subBA, squaredSubBA, projections[i]);
}
}
msort(projections, nPoints, pointsTmp);
if (nPoints % 2 == 0) {
middle(projections[nPoints / 2 - 1], projections[nPoints / 2], center);
} else {
copy(projections[nPoints / 2], center);
}
const double radius = distance(center, points[calcFurthestIdx(points, nPoints, center, &maxD)]);
int nPointsL = 0;
int nPointsR = 0;
// Reusing Previous Allocated Arrays
double **pointsL = projections;
double **pointsR = pointsTmp;
partitionTree(projectionsPoints, center[0], points, nPoints, pointsL, &nPointsL, pointsR, &nPointsR);
pointsL = realloc(pointsL, sizeof(double *) * nPointsL);
pointsR = realloc(pointsR, sizeof(double *) * nPointsR);
free(subBA);
free(projectionsPoints);
int nidL, nidR;
#pragma omp task shared(nidL) if (nThreads > 1)
nidL = buildTreeOMP(pointsL, nPointsL, nThreads / 2);
nidR = buildTreeOMP(pointsR, nPointsR, nThreads - nThreads / 2);
#pragma omp taskwait
free(projections);
free(pointsTmp);
return newNode(center, radius, nidL, nidR);
}
|
critical.c | // RUN: %libomp-compile-and-run | FileCheck %s
// REQUIRES: ompt
// UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7
#include "callback.h"
#include <omp.h>
int main()
{
#pragma omp critical
{
print_current_address(1);
print_ids(0);
}
print_current_address(2);
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquire'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquired'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_released'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_nest_lock'
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_wait_critical: wait_id=[[WAIT_ID:[0-9]+]], hint={{[0-9]+}}, impl={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_acquired_critical: wait_id=[[WAIT_ID]], codeptr_ra=[[RETURN_ADDRESS]]
// CHECK-NEXT: {{^}}[[MASTER_ID]]: current_address={{.*}}[[RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_release_critical: wait_id=[[WAIT_ID]], codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]
// CHECK-NEXT: {{^}}[[MASTER_ID]]: current_address={{.*}}[[RETURN_ADDRESS]]
return 0;
}
|
count_shared_kmer.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <time.h>
#include <sys/time.h>
#include <omp.h>
int K = 24; // kmer's k. K <= 32
//#define FASTQ
#define BUFSIZE 400000
int s_chunk=8192;
char mat[128];
unsigned long long to_code[128];
void chomp(char * s){
int len = strlen(s);
(s[len-1] == '\n') ? s[len-1] = '\0' : fprintf(stderr, "strange str %s\nnot end with newline (too long line? > %d)", s,BUFSIZE); // chomp
}
void reversecomplement(char * str){
int len = strlen(str);
int loop=len/2;
int i;
char tmp;
for(i=0; i<loop; ++i){
// swap
tmp = str[i];
str[i] = str[len-1-i];
str[len-1-i] = tmp;
}
for(i=0; i<len; ++i){
str[i] = mat[(int)str[i]];
}
}
unsigned long long ntuple_code(const char * str, int stt, int n){
if(n>32){
fprintf(stderr, "nruplw_code: cannot handle %d(>32)-mer\n",n);
exit(1);
}
unsigned long long ret=0;
int i;
for(i=0; i<n; ++i){
ret |= (to_code[(int)str[stt+i]] << (2*(n-i-1)));
}
return ret;
}
double gettimeofday_sec(){
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec + tv.tv_usec * 1e-6;
}
void read2kmerpop(char * read, unsigned long long * bucket);
void p_num_of_sharing_kmers(char **, char **, unsigned long long **, unsigned long long **, int*, int);
int compare_ull(const void *a, const void *b);
int opt_fastq=0;
int opt_type=0;
int main(int argc, char * argv[]){
mat[(int)'A'] = 'T';
mat[(int)'a'] = 'T';
mat[(int)'C'] = 'G';
mat[(int)'c'] = 'G';
mat[(int)'G'] = 'C';
mat[(int)'g'] = 'C';
mat[(int)'T'] = 'A';
mat[(int)'t'] = 'A';
mat[(int)'N'] = 'N';
mat[(int)'n'] = 'N';
to_code[(int)'A'] = 0ull;
to_code[(int)'a'] = 0ull;
to_code[(int)'C'] = 1ull;
to_code[(int)'c'] = 1ull;
to_code[(int)'G'] = 2ull;
to_code[(int)'g'] = 2ull;
to_code[(int)'T'] = 3ull;
to_code[(int)'t'] = 3ull;
to_code[(int)'N'] = 0ull;// XXX
to_code[(int)'n'] = 0ull;
int hitnum=0;
{
int result;
while((result=getopt(argc,argv,"k:qt")) != -1){
switch(result){
case 'k':
K=atoi(optarg);
if(K > 32 || K < 1){
fprintf(stderr, "K must be 1 <= K <= 32.\n");
return 1;
}
hitnum+=2;
break;
case 'q':
opt_fastq=1;
++hitnum;
break;
case 't':
opt_type=1;
++hitnum;
break;
case '?':
printf("humei\n");
break;
default:
break;
}
}
}
if(argc != 2+hitnum){
fprintf(stderr, "USAGE: <this> <in.fa>\n");
fprintf(stderr, "\t-k=i: kmer's k (<=32)\n");
fprintf(stderr, "\t-t: count the number of types of shared kmers, not freq\n");
return 1;
}
char * in_fa = argv[1+hitnum];
FILE * fp = fopen(in_fa,"r");
if(fp == NULL){
fprintf(stderr, "cannot open %s\n", in_fa);
exit(1);
}
char **reads = (char**)malloc(sizeof(char*)*s_chunk);
if(reads == NULL){
fprintf(stderr,"cannot allocate memory: reads\n");
exit(1);
}
{
int i;
for(i=0; i<s_chunk; ++i){
reads[i] = (char*)malloc(sizeof(char)*BUFSIZE);
if(reads[i] == NULL){
fprintf(stderr,"cannot allocate memory: reads[%d]\n",i);
exit(1);
}
}
}
char **nls = (char**)malloc(sizeof(char*)*s_chunk);//namelines
if(nls == NULL){
fprintf(stderr,"cannot allocate memory: nls\n");
exit(1);
}
{
int i;
for(i=0; i<s_chunk; ++i){
nls[i] = (char*)malloc(sizeof(char)*BUFSIZE);
if(nls[i] == NULL){
fprintf(stderr,"cannot allocate memory: nls[%d]\n",i);
exit(1);
}
}
}
char *dum = (char*)malloc(sizeof(char)*BUFSIZE);
if(dum == NULL){
fprintf(stderr,"cannot allocate memory: dum\n");
exit(1);
}
unsigned long long ** buckets = (unsigned long long**)malloc(sizeof(unsigned long long*)*s_chunk);
if(buckets == NULL){
fprintf(stderr, "cannot allocate memory: buckets\n");
exit(1);
}
{
int i;
for(i=0; i<s_chunk; ++i){
buckets[i] = (unsigned long long*)malloc(sizeof(unsigned long long)*BUFSIZE);
if(buckets[i] == NULL){
fprintf(stderr,"cannot allocate memory: buckets[%d]\n",i);
exit(1);
}
}
}
unsigned long long ** b2 = (unsigned long long**)malloc(sizeof(unsigned long long*)*s_chunk);
if(b2 == NULL){
fprintf(stderr, "cannot allocate memory: b2\n");
exit(1);
}
{
int i;
for(i=0; i<s_chunk; ++i){
b2[i] = (unsigned long long*)malloc(sizeof(unsigned long long)*BUFSIZE);
if(b2[i] == NULL){
fprintf(stderr,"cannot allocate memory: b2[%d]\n",i);
exit(1);
}
}
}
int * n_share = (int*)malloc(sizeof(int)*s_chunk);
if(n_share == NULL){
fprintf(stderr,"cannot allocate memory: n_share\n");
exit(1);
}
int n_read=0;
while(fgets(nls[n_read],BUFSIZE,fp)!=NULL){
chomp(nls[n_read]);
fgets(reads[n_read],BUFSIZE,fp);
chomp(reads[n_read]);
if(opt_fastq){
fgets(dum,BUFSIZE,fp);// opt
fgets(dum,BUFSIZE,fp);// qvs
}
if(strlen(reads[n_read]) < K){
continue;
}
else{
++n_read;
}
if(n_read<s_chunk){
continue;
}
p_num_of_sharing_kmers(reads,nls,buckets,b2,n_share,n_read);
n_read=0;
}
p_num_of_sharing_kmers(reads,nls,buckets,b2,n_share,n_read);
fclose(fp);
{
int i;
for(i=0; i<s_chunk; ++i){
free(reads[i]);
}
}
free(reads);
{
int i;
for(i=0; i<s_chunk; ++i){
free(nls[i]);
}
}
free(nls);
free(dum);
{
int i;
for(i=0; i<s_chunk; ++i){
free(buckets[i]);
}
}
free(buckets);
{
int i;
for(i=0; i<s_chunk; ++i){
free(b2[i]);
}
}
free(b2);
free(n_share);
return 0;
}
void read2kmerpop(char * read, unsigned long long * bucket){
unsigned long long kalph = ntuple_code(read,0,K);
bucket[0] = kalph;
unsigned long long mask=1ull;
if(K<32){
mask<<=2*K;
mask -= 1ull;
}
else{
mask = 0xffffffffffffffffull;
}
int imax = strlen(read)-K;
int i;
for(i=1; i<= imax; ++i){
kalph <<= 2;
kalph &= mask;
kalph |= to_code[(int)read[K-1+i]];
bucket[i] = kalph;
}
/*
for(i=0; i<count; ++i){
if(bucket[i] > 0){
fprintf(stdout, "%d\t%d\n", i, bucket[i]);
}
}
fprintf(stdout, "%d\t%d\n", INT_MAX, INT_MAX);// as a separator
*/
return;
}
void p_num_of_sharing_kmers(char ** reads, char ** nls, unsigned long long ** buckets, unsigned long long ** b2, int * n_share, int n_reads){
int i;
#pragma omp parallel for
for(i=0; i<n_reads; ++i){
// printf("%s\n",read);
// double stt,end;
// stt=gettimeofday_sec();
read2kmerpop(reads[i], buckets[i]);
// end=gettimeofday_sec();
// fprintf(stderr,"read2kmerpop_1: %f\n",end-stt);
reversecomplement(reads[i]);
// printf("%s\n",read);
// stt=gettimeofday_sec();
read2kmerpop(reads[i], b2[i]);
// end=gettimeofday_sec();
// fprintf(stderr,"read2kmerpop_2: %f\n",end-stt);
// stt=gettimeofday_sec();
int j,k;
int loop = strlen(reads[i])-K+1;
n_share[i]=0;
qsort(buckets[i], loop, sizeof(unsigned long long), compare_ull);
qsort(b2[i], loop, sizeof(unsigned long long), compare_ull);
/*
for(j=0; j<loop;++j){
printf("%llu,",buckets[i][j]);
}
printf("\n");
for(j=0; j<loop;++j){
printf("%llu,",b2[i][j]);
}
printf("\n");
*/
for(j=0,k=0; j<loop && k<loop;){
if(buckets[i][j] < b2[i][k]){
++j;
}
else if(buckets[i][j] > b2[i][k]){
++k;
}
else{
++n_share[i];
++j;
++k;
if(opt_type){
while(j<loop && buckets[i][j] == buckets[i][j-1]){
++j;
}
while(k<loop && b2[i][k] == b2[i][k-1]){
++k;
}
}
}
}
printf("%d\t%s\n",n_share[i],&nls[i][1]);
// end=gettimeofday_sec();
// fprintf(stderr,"kmercount: %f\n",end-stt);
}
}
int compare_ull(const void *a, const void *b){
unsigned long long foo = *(unsigned long long*)a;
unsigned long long bar = *(unsigned long long*)b;
// printf("%llu\n",foo);
// printf("%llu\n",bar);
if(foo<bar){
return -1;
}
else if(foo==bar){
return 0;
}
else{
return 1;
}
}
|
facedetectcnn.h | #include <Rcpp.h>
/*
By downloading, copying, installing or using the software you agree to this license.
If you do not agree to this license, do not download, install,
copy or use the software.
License Agreement For libfacedetection
(3-clause BSD License)
Copyright (c) 2018-2020, Shiqi Yu, all rights reserved.
shiqi.yu@gmail.com
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the names of the copyright holders nor the names of the contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
This software is provided by the copyright holders and contributors "as is" and
any express or implied warranties, including, but not limited to, the implied
warranties of merchantability and fitness for a particular purpose are disclaimed.
In no event shall copyright holders or contributors be liable for any direct,
indirect, incidental, special, exemplary, or consequential damages
(including, but not limited to, procurement of substitute goods or services;
loss of use, data, or profits; or business interruption) however caused
and on any theory of liability, whether in contract, strict liability,
or tort (including negligence or otherwise) arising in any way out of
the use of this software, even if advised of the possibility of such damage.
*/
#pragma once
#include "facedetection_export.h"
//#define _ENABLE_AVX512 //Please enable it if X64 CPU
//#define _ENABLE_AVX2 //Please enable it if X64 CPU
//#define _ENABLE_NEON //Please enable it if ARM CPU
FACEDETECTION_EXPORT int * facedetect_cnn(unsigned char * result_buffer, //buffer memory for storing face detection results, !!its size must be 0x20000 Bytes!!
unsigned char * rgb_image_data, int width, int height, int step); //input image, it must be BGR (three channels) insteed of RGB image!
/*
DO NOT EDIT the following code if you don't really understand it.
*/
#if defined(_ENABLE_AVX512) || defined(_ENABLE_AVX2)
#include <immintrin.h>
#endif
#if defined(_ENABLE_NEON)
#include "arm_neon.h"
//NEON does not support UINT8*INT8 dot product
//to conver the input data to range [0, 127],
//and then use INT8*INT8 dot product
#define _MAX_UINT8_VALUE 127
#else
#define _MAX_UINT8_VALUE 255
#endif
#if defined(_ENABLE_AVX512)
#define _MALLOC_ALIGN 512
#elif defined(_ENABLE_AVX2)
#define _MALLOC_ALIGN 256
#else
#define _MALLOC_ALIGN 128
#endif
#if defined(_ENABLE_AVX512)&& defined(_ENABLE_NEON)
#error Cannot enable the two of AVX512 and NEON at the same time.
#endif
#if defined(_ENABLE_AVX2)&& defined(_ENABLE_NEON)
#error Cannot enable the two of AVX and NEON at the same time.
#endif
#if defined(_ENABLE_AVX512)&& defined(_ENABLE_AVX2)
#error Cannot enable the two of AVX512 and AVX2 at the same time.
#endif
#if defined(_OPENMP)
#include <omp.h>
#endif
#include <string.h>
#include <vector>
#include <iostream>
#include <typeinfo>
using namespace std;
void* myAlloc(size_t size);
void myFree_(void* ptr);
#define myFree(ptr) (myFree_(*(ptr)), *(ptr)=0);
#ifndef MIN
# define MIN(a,b) ((a) > (b) ? (b) : (a))
#endif
#ifndef MAX
# define MAX(a,b) ((a) < (b) ? (b) : (a))
#endif
typedef struct FaceRect_
{
float score;
int x;
int y;
int w;
int h;
int lm[10];
}FaceRect;
typedef struct ConvInfoStruct_ {
int pad;
int stride;
int kernel_size;
int channels;
int num;
float scale;
signed char* pWeights;
signed int* pBias;
}ConvInfoStruct;
template <class T>
class CDataBlob
{
public:
T * data;
int width;
int height;
int channels;
int channelStep;
float scale;
//when the datablob is a filter, the bias is 0 by default
//if it is the filted data, the bias is 1 by default
int bias;
public:
CDataBlob() {
data = 0;
width = 0;
height = 0;
channels = 0;
channelStep = 0;
scale = 1.0f;
bias = 0;
}
CDataBlob(int w, int h, int c)
{
data = 0;
create(w, h, c);
}
~CDataBlob()
{
setNULL();
}
void setNULL()
{
if (data)
myFree(&data);
width = height = channels = channelStep = 0;
scale = 1.0f;
}
bool create(int w, int h, int c)
{
setNULL();
width = w;
height = h;
channels = c;
bias = 0;
//alloc space for int8 array
int remBytes = (sizeof(T)* channels) % (_MALLOC_ALIGN / 8);
if (remBytes == 0)
this->channelStep = channels * sizeof(T);
else
this->channelStep = (channels * sizeof(T)) + (_MALLOC_ALIGN / 8) - remBytes;
data = (T*)myAlloc(size_t(width) * height * this->channelStep);
if (data == NULL)
{
Rcpp::Rcerr << "Failed to alloc memeory for uint8 data blob: "
<< width << "*"
<< height << "*"
<< channels << endl;
return false;
}
//memset(data, 0, width * height * channelStep);
//the following code is faster than memset
//but not only the padding bytes are set to zero.
//BE CAREFUL!!!
//#if defined(_OPENMP)
//#pragma omp parallel for
//#endif
for (int r = 0; r < this->height; r++)
{
for (int c = 0; c < this->width; c++)
{
int pixel_end = this->channelStep / sizeof(T);
T * pI = (this->data + (size_t(r) * this->width + c) * this->channelStep /sizeof(T));
for (int ch = this->channels; ch < pixel_end; ch++)
pI[ch] = 0;
}
}
return true;
}
bool setInt8FilterData(signed char * pData, int bias, int dataWidth, int dataHeight, int dataChannels)
{
if (pData == NULL)
{
Rcpp::Rcerr << "The input image data is null." << endl;
return false;
}
if (typeid(signed char) != typeid(T))
{
Rcpp::Rcerr << "Data must be signed char, the same with the source data." << endl;
return false;
}
if (dataWidth != this->width ||
dataHeight != this->height ||
dataChannels != this->channels)
{
Rcpp::Rcerr << "The dimension of the data can not match that of the Blob." << endl;
return false;
}
for(int row = 0; row < height; row++)
for (int col = 0; col < width; col++)
{
T * p = (this->data + (size_t(width) * row + col) * channelStep /sizeof(T));
for (int ch = 0; ch < channels; ch++)
{
p[ch] = pData[ch * height * width + row * width + col];
}
}
this->bias = bias;
return true;
}
bool setDataFrom3x3S2P1to1x1S1P0FromImage(const unsigned char * imgData, int imgWidth, int imgHeight, int imgChannels, int imgWidthStep)
{
if (imgData == NULL)
{
Rcpp::Rcerr << "The input image data is null." << endl;
return false;
}
if (typeid(unsigned char) != typeid(T))
{
Rcpp::Rcerr << "Data must be unsigned char, the same with the source data." << endl;
return false;
}
if (imgChannels != 3)
{
Rcpp::Rcerr << "The input image must be a 3-channel RGB image." << endl;
return false;
}
create((imgWidth+1)/2, (imgHeight+1)/2, 27);
//since the pixel assignment cannot fill all the elements in the blob.
//some elements in the blob should be initialized to 0
memset(data, 0, size_t(width) * height * channelStep);
#if defined(_OPENMP)
#pragma omp parallel for
#endif
for (int r = 0; r < this->height; r++)
{
for (int c = 0; c < this->width; c++)
{
T * pData = (unsigned char*)this->data + (size_t(r) * this->width + c) * this->channelStep;
for (int fy = -1; fy <= 1; fy++)
{
int srcy = r * 2 + fy;
if (srcy < 0 || srcy >= imgHeight) //out of the range of the image
continue;
for (int fx = -1; fx <= 1; fx++)
{
int srcx = c * 2 + fx;
if (srcx < 0 || srcx >= imgWidth) //out of the range of the image
continue;
const unsigned char * pImgData = imgData + size_t(imgWidthStep) * srcy + imgChannels * srcx;
int output_channel_offset = ((fy + 1) * 3 + fx + 1) * 3; //3x3 filters, 3-channel image
#if defined(_ENABLE_NEON)
pData[output_channel_offset] = (pImgData[0] / 2);
pData[output_channel_offset + 1] = (pImgData[1] / 2);
pData[output_channel_offset + 2] = (pImgData[2] / 2);
#else
pData[output_channel_offset] = (pImgData[0]);
pData[output_channel_offset+1] = (pImgData[1]);
pData[output_channel_offset+2] = (pImgData[2]);
#endif
}
}
}
}
#if defined(_ENABLE_NEON)
this->bias = 1; // 1/2 = 0
this->scale = 0.5f;
#else
this->bias = 1;
this->scale = 1.0f;
#endif
return true;
}
T getElement(int x, int y, int channel)
{
if (this->data)
{
if (x >= 0 && x < this->width &&
y >= 0 && y < this->height &&
channel >= 0 && channel < this->channels)
{
T * p = this->data + (size_t(y) * this->width + x) * this->channelStep/sizeof(T);
return (p[channel]);
}
}
return (T)(0);
}
friend ostream &operator<<(ostream &output, const CDataBlob &dataBlob)
{
output << "DataBlob Size (Width, Height, Channel, scale) = ("
<< dataBlob.width
<< ", " << dataBlob.height
<< ", " << dataBlob.channels
<< ", " << dataBlob.scale
<< ", " << dataBlob.bias
<< ")" << endl;
for (int ch = 0; ch < dataBlob.channels; ch++)
{
output << "Channel " << ch << ": " << endl;
for (int row = 0; row < dataBlob.height; row++)
{
output << "(";
for (int col = 0; col < dataBlob.width; col++)
{
T * p = (dataBlob.data + (dataBlob.width * row + col) * dataBlob.channelStep /sizeof(T) );
if(sizeof(T)<4)
output << (int)(p[ch]);
else
output << p[ch];
if (col != dataBlob.width - 1)
output << ", ";
}
output << ")" << endl;
}
}
return output;
}
};
class Filters {
public:
vector<CDataBlob<signed char> *> filters;
int pad;
int stride;
float scale; //element * scale = original value
Filters()
{
pad = 0;
stride = 0;
scale = 0;
}
~Filters()
{
for (int i = 0; i < (int)(filters.size()); i++)
{
delete filters[i];
filters[i] = 0;
}
}
};
bool convertInt2Float(CDataBlob<int> * inputData, CDataBlob<float> * outputData);
bool convolution(CDataBlob<unsigned char> *inputData, const Filters* filters, CDataBlob<int> *outputData);
bool convolution_relu(CDataBlob<unsigned char> *inputData, const Filters* filters, CDataBlob<unsigned char> *outputData);
bool maxpooling2x2S2(const CDataBlob<unsigned char> *inputData, CDataBlob<unsigned char> *outputData);
bool priorbox(const CDataBlob<unsigned char> * featureData, int img_width, int img_height, int step, int num_sizes, float * pWinSizes, CDataBlob<float> * outputData);
template<typename T>
bool concat4(const CDataBlob<T> *inputData1, const CDataBlob<T> *inputData2, const CDataBlob<T> *inputData3, const CDataBlob<T> *inputData4, CDataBlob<T> *outputData);
/* the input data for softmax must be a vector, the data stored in a multi-channel blob with size 1x1 */
template<typename T>
bool blob2vector(const CDataBlob<T> * inputData, CDataBlob<T> * outputData);
bool softmax1vector2class(CDataBlob<float> *inputOutputData);
bool detection_output(const CDataBlob<float> * priorbox, const CDataBlob<float> * loc, const CDataBlob<float> * conf, float overlap_threshold, float confidence_threshold, int top_k, int keep_top_k, CDataBlob<float> * outputData);
vector<FaceRect> objectdetect_cnn(unsigned char * rgbImageData, int with, int height, int step);
|
soxr.c | /* SoX Resampler Library Copyright (c) 2007-18 robs@users.sourceforge.net
* Licence for this file: LGPL v2.1 See LICENCE for details. */
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include "soxr.h"
#include "data-io.h"
#include "internal.h"
#if AVUTIL_FOUND
#include <libavutil/cpu.h>
#endif
#if WITH_DEV_TRACE
#include <stdarg.h>
#include <stdio.h>
int _soxr_trace_level;
void _soxr_trace(char const * fmt, ...)
{
va_list args;
va_start(args, fmt);
vfprintf(stderr, fmt, args);
fputc('\n', stderr);
va_end(args);
}
#endif
char const * soxr_version(void)
{
return "libsoxr-" SOXR_THIS_VERSION_STR;
}
typedef void sample_t; /* float or double */
typedef void (* fn_t)(void);
typedef fn_t control_block_t[10];
#define resampler_input (*(sample_t * (*)(void *, sample_t * samples, size_t n))p->control_block[0])
#define resampler_process (*(void (*)(void *, size_t))p->control_block[1])
#define resampler_output (*(sample_t const * (*)(void *, sample_t * samples, size_t * n))p->control_block[2])
#define resampler_flush (*(void (*)(void *))p->control_block[3])
#define resampler_close (*(void (*)(void *))p->control_block[4])
#define resampler_delay (*(double (*)(void *))p->control_block[5])
#define resampler_sizes (*(void (*)(size_t * shared, size_t * channel))p->control_block[6])
#define resampler_create (*(char const * (*)(void * channel, void * shared, double io_ratio, soxr_quality_spec_t * q_spec, soxr_runtime_spec_t * r_spec, double scale))p->control_block[7])
#define resampler_set_io_ratio (*(void (*)(void *, double io_ratio, size_t len))p->control_block[8])
#define resampler_id (*(char const * (*)(void))p->control_block[9])
typedef void * resampler_t; /* For one channel. */
typedef void * resampler_shared_t; /* Between channels. */
typedef void (* deinterleave_t)(sample_t * * dest,
soxr_datatype_t data_type, void const * * src0, size_t n, unsigned ch);
typedef size_t (* interleave_t)(soxr_datatype_t data_type, void * * dest,
sample_t const * const * src, size_t, unsigned, unsigned long *);
struct soxr {
unsigned num_channels;
double io_ratio;
soxr_error_t error;
soxr_quality_spec_t q_spec;
soxr_io_spec_t io_spec;
soxr_runtime_spec_t runtime_spec;
void * input_fn_state;
soxr_input_fn_t input_fn;
size_t max_ilen;
resampler_shared_t shared;
resampler_t * resamplers;
control_block_t control_block;
deinterleave_t deinterleave;
interleave_t interleave;
void * * channel_ptrs;
size_t clips;
unsigned long seed;
int flushing;
};
#if WITH_CR32 || WITH_CR32S || WITH_CR64 || WITH_CR64S
#include "filter.h"
#else
#define lsx_to_3dB(x) ((x)/(x))
#endif
soxr_quality_spec_t soxr_quality_spec(unsigned long recipe, unsigned long flags)
{
soxr_quality_spec_t spec, * p = &spec;
unsigned q = recipe & 0xf; /* TODO: move to soxr-lsr.c: */
unsigned quality = q > SOXR_LSR2Q+2? SOXR_VHQ : q > SOXR_LSR2Q? SOXR_QQ : q;
double rej;
memset(p, 0, sizeof(*p));
if (quality > SOXR_PRECISIONQ) {
p->e = "invalid quality type";
return spec;
}
flags |= quality < SOXR_LSR0Q ? RESET_ON_CLEAR : 0;
p->phase_response = "\62\31\144"[(recipe & 0x30)>>4];
p->stopband_begin = 1;
p->precision =
quality == SOXR_QQ ? 0 :
quality <= SOXR_16_BITQ ? 16 :
quality <= SOXR_32_BITQ ? 4 + quality * 4 :
quality <= SOXR_LSR2Q ? 55 - quality * 4 : /* TODO: move to soxr-lsr.c */
0;
rej = p->precision * linear_to_dB(2.);
p->flags = flags;
if (quality <= SOXR_32_BITQ || quality == SOXR_PRECISIONQ) {
#define LOW_Q_BW0 (1385 / 2048.) /* 0.67625 rounded to be a FP exact. */
p->passband_end = quality == 1? LOW_Q_BW0 : 1 - .05 / lsx_to_3dB(rej);
if (quality <= 2)
p->flags &= ~SOXR_ROLLOFF_NONE, p->flags |= SOXR_ROLLOFF_MEDIUM;
}
else { /* TODO: move to soxr-lsr.c */
static float const bw[] = {.931f, .832f, .663f};
p->passband_end = bw[quality - SOXR_LSR0Q];
if (quality == SOXR_LSR2Q) {
p->flags &= ~SOXR_ROLLOFF_NONE;
p->flags |= SOXR_ROLLOFF_LSR2Q | SOXR_PROMOTE_TO_LQ;
}
}
if (recipe & SOXR_STEEP_FILTER)
p->passband_end = 1 - .01 / lsx_to_3dB(rej);
return spec;
}
char const * soxr_engine(soxr_t p)
{
return resampler_id();
}
size_t * soxr_num_clips(soxr_t p)
{
return &p->clips;
}
soxr_error_t soxr_error(soxr_t p)
{
return p->error;
}
soxr_runtime_spec_t soxr_runtime_spec(unsigned num_threads)
{
soxr_runtime_spec_t spec, * p = &spec;
memset(p, 0, sizeof(*p));
p->log2_min_dft_size = 10;
p->log2_large_dft_size = 17;
p->coef_size_kbytes = 400;
p->num_threads = num_threads;
return spec;
}
soxr_io_spec_t soxr_io_spec(
soxr_datatype_t itype,
soxr_datatype_t otype)
{
soxr_io_spec_t spec, * p = &spec;
memset(p, 0, sizeof(*p));
if ((itype | otype) >= SOXR_SPLIT * 2)
p->e = "invalid io datatype(s)";
else {
p->itype = itype;
p->otype = otype;
p->scale = 1;
}
return spec;
}
#if (WITH_CR32S && WITH_CR32) || (WITH_CR64S && WITH_CR64)
#if defined __GNUC__ && defined __x86_64__
#define CPUID(type, eax_, ebx_, ecx_, edx_) \
__asm__ __volatile__ ( \
"cpuid \n\t" \
: "=a" (eax_), "=b" (ebx_), "=c" (ecx_), "=d" (edx_) \
: "a" (type), "c" (0));
#elif defined __GNUC__ && defined __i386__
#define CPUID(type, eax_, ebx_, ecx_, edx_) \
__asm__ __volatile__ ( \
"mov %%ebx, %%edi \n\t" \
"cpuid \n\t" \
"xchg %%edi, %%ebx \n\t" \
: "=a" (eax_), "=D" (ebx_), "=c" (ecx_), "=d" (edx_) \
: "a" (type), "c" (0));
#elif defined _M_X64 && defined _MSC_VER && _MSC_VER > 1500
void __cpuidex(int CPUInfo[4], int info_type, int ecxvalue);
#pragma intrinsic(__cpuidex)
#define CPUID(type, eax_, ebx_, ecx_, edx_) do { \
int regs[4]; \
__cpuidex(regs, type, 0); \
eax_ = regs[0], ebx_ = regs[1], ecx_ = regs[2], edx_ = regs[3]; \
} while(0)
#elif defined _M_X64 && defined _MSC_VER
void __cpuidex(int CPUInfo[4], int info_type);
#pragma intrinsic(__cpuidex)
#define CPUID(type, eax_, ebx_, ecx_, edx_) do { \
int regs[4]; \
__cpuidex(regs, type); \
eax_ = regs[0], ebx_ = regs[1], ecx_ = regs[2], edx_ = regs[3]; \
} while(0)
#elif defined _M_IX86 && defined _MSC_VER
#define CPUID(type, eax_, ebx_, ecx_, edx_) \
__asm pushad \
__asm mov eax, type \
__asm xor ecx, ecx \
__asm cpuid \
__asm mov eax_, eax \
__asm mov ebx_, ebx \
__asm mov ecx_, ecx \
__asm mov edx_, edx \
__asm popad
#endif
#endif
#if WITH_CR32S && WITH_CR32
static bool cpu_has_simd32(void)
{
#if defined __x86_64__ || defined _M_X64
return true;
#elif defined __i386__ || defined _M_IX86
enum {SSE = 1 << 25, SSE2 = 1 << 26};
unsigned eax_, ebx_, ecx_, edx_;
CPUID(1, eax_, ebx_, ecx_, edx_);
return (edx_ & (SSE|SSE2)) != 0;
#elif defined AV_CPU_FLAG_NEON
return !!(av_get_cpu_flags() & AV_CPU_FLAG_NEON);
#else
return false;
#endif
}
static bool should_use_simd32(void)
{
char const * e;
return ((e = getenv("SOXR_USE_SIMD" )))? !!atoi(e) :
((e = getenv("SOXR_USE_SIMD32")))? !!atoi(e) : cpu_has_simd32();
}
#else
#define should_use_simd32() true
#endif
#if WITH_CR64S && WITH_CR64
#if defined __GNUC__
#define XGETBV(type, eax_, edx_) \
__asm__ __volatile__ ( \
".byte 0x0f, 0x01, 0xd0\n" \
: "=a"(eax_), "=d"(edx_) : "c" (type));
#elif defined _M_X64 && defined _MSC_FULL_VER && _MSC_FULL_VER >= 160040219
#include <immintrin.h>
#define XGETBV(type, eax_, edx_) do { \
union {uint64_t x; uint32_t y[2];} a = {_xgetbv(0)}; \
eax_ = a.y[0], edx_ = a.y[1]; \
} while(0)
#elif defined _M_IX86 && defined _MSC_VER
#define XGETBV(type, eax_, edx_) \
__asm pushad \
__asm mov ecx, type \
__asm _emit 0x0f \
__asm _emit 0x01 \
__asm _emit 0xd0 \
__asm mov eax_, eax \
__asm mov edx_, edx \
__asm popad
#else
#define XGETBV(type, eax_, edx_) eax_ = edx_ = 0
#endif
static bool cpu_has_simd64(void)
{
enum {OSXSAVE = 1 << 27, AVX = 1 << 28};
unsigned eax_, ebx_, ecx_, edx_;
CPUID(1, eax_, ebx_, ecx_, edx_);
if ((ecx_ & (OSXSAVE|AVX)) == (OSXSAVE|AVX)) {
XGETBV(0, eax_, edx_);
return (eax_ & 6) == 6;
}
return false;
}
static bool should_use_simd64(void)
{
char const * e;
return ((e = getenv("SOXR_USE_SIMD" )))? !!atoi(e) :
((e = getenv("SOXR_USE_SIMD64")))? !!atoi(e) : cpu_has_simd64();
}
#else
#define should_use_simd64() true
#endif
extern control_block_t
_soxr_rate32_cb,
_soxr_rate32s_cb,
_soxr_rate64_cb,
_soxr_rate64s_cb,
_soxr_vr32_cb;
static void runtime_num(char const * env_name,
int min, int max, unsigned * field)
{
char const * e = getenv(env_name);
if (e) {
int i = atoi(e);
if (i >= min && i <= max)
*field = (unsigned)i;
}
}
static void runtime_flag(char const * env_name,
unsigned n_bits, unsigned n_shift, unsigned long * flags)
{
char const * e = getenv(env_name);
if (e) {
int i = atoi(e);
unsigned long mask = (1UL << n_bits) - 1;
if (i >= 0 && i <= (int)mask)
*flags &= ~(mask << n_shift), *flags |= ((unsigned long)i << n_shift);
}
}
soxr_t soxr_create(
double input_rate, double output_rate,
unsigned num_channels,
soxr_error_t * error0,
soxr_io_spec_t const * io_spec,
soxr_quality_spec_t const * q_spec,
soxr_runtime_spec_t const * runtime_spec)
{
double io_ratio = output_rate!=0? input_rate!=0?
input_rate / output_rate : -1 : input_rate!=0? -1 : 0;
static const float datatype_full_scale[] = {1, 1, 65536.*32768, 32768};
soxr_t p = 0;
soxr_error_t error = 0;
#if WITH_DEV_TRACE
#define _(x) (char)(sizeof(x)>=10? 'a'+(char)(sizeof(x)-10):'0'+(char)sizeof(x))
char const * e = getenv("SOXR_TRACE");
_soxr_trace_level = e? atoi(e) : 0;
{
static char const arch[] = {_(char), _(short), _(int), _(long), _(long long)
, ' ', _(float), _(double), _(long double)
, ' ', _(int *), _(int (*)(int))
, ' ', HAVE_BIGENDIAN ? 'B' : 'L'
#if defined _OPENMP
, ' ', 'O', 'M', 'P'
#endif
, 0};
#undef _
lsx_debug("arch: %s", arch);
}
#endif
if (q_spec && q_spec->e) error = q_spec->e;
else if (io_spec && (io_spec->itype | io_spec->otype) >= SOXR_SPLIT * 2)
error = "invalid io datatype(s)";
if (!error && !(p = calloc(sizeof(*p), 1))) error = "malloc failed";
if (p) {
control_block_t * control_block;
p->q_spec = q_spec? *q_spec : soxr_quality_spec(SOXR_HQ, 0);
if (q_spec) { /* Backwards compatibility with original API: */
if (p->q_spec.passband_end > 2)
p->q_spec.passband_end /= 100;
if (p->q_spec.stopband_begin > 2)
p->q_spec.stopband_begin = 2 - p->q_spec.stopband_begin / 100;
}
p->io_ratio = io_ratio;
p->num_channels = num_channels;
if (io_spec)
p->io_spec = *io_spec;
else
p->io_spec.scale = 1;
p->runtime_spec = runtime_spec? *runtime_spec : soxr_runtime_spec(1);
runtime_num("SOXR_MIN_DFT_SIZE", 8, 15, &p->runtime_spec.log2_min_dft_size);
runtime_num("SOXR_LARGE_DFT_SIZE", 8, 20, &p->runtime_spec.log2_large_dft_size);
runtime_num("SOXR_COEFS_SIZE", 100, 800, &p->runtime_spec.coef_size_kbytes);
runtime_num("SOXR_NUM_THREADS", 0, 64, &p->runtime_spec.num_threads);
runtime_flag("SOXR_COEF_INTERP", 2, 0, &p->runtime_spec.flags);
runtime_flag("SOXR_STRICT_BUF", 1, 2, &p->runtime_spec.flags);
runtime_flag("SOXR_NOSMALLINTOPT", 1, 3, &p->runtime_spec.flags);
p->io_spec.scale *= datatype_full_scale[p->io_spec.otype & 3] /
datatype_full_scale[p->io_spec.itype & 3];
p->seed = (unsigned long)time(0) ^ (unsigned long)(size_t)p;
#if WITH_CR32 || WITH_CR32S || WITH_VR32
if (0
#if WITH_VR32
|| ((!WITH_CR32 && !WITH_CR32S) || (p->q_spec.flags & SOXR_VR))
#endif
#if WITH_CR32 || WITH_CR32S
|| !(WITH_CR64 || WITH_CR64S) || (p->q_spec.precision <= 20 && !(p->q_spec.flags & SOXR_DOUBLE_PRECISION))
#endif
) {
p->deinterleave = (deinterleave_t)_soxr_deinterleave_f;
p->interleave = (interleave_t)_soxr_interleave_f;
control_block =
#if WITH_VR32
((!WITH_CR32 && !WITH_CR32S) || (p->q_spec.flags & SOXR_VR))? &_soxr_vr32_cb :
#endif
#if WITH_CR32S
!WITH_CR32 || should_use_simd32()? &_soxr_rate32s_cb :
#endif
&_soxr_rate32_cb;
}
#if WITH_CR64 || WITH_CR64S
else
#endif
#endif
#if WITH_CR64 || WITH_CR64S
{
p->deinterleave = (deinterleave_t)_soxr_deinterleave;
p->interleave = (interleave_t)_soxr_interleave;
control_block =
#if WITH_CR64S
!WITH_CR64 || should_use_simd64()? &_soxr_rate64s_cb :
#endif
&_soxr_rate64_cb;
}
#endif
memcpy(&p->control_block, control_block, sizeof(p->control_block));
if (p->num_channels && io_ratio!=0)
error = soxr_set_io_ratio(p, io_ratio, 0);
}
if (error)
soxr_delete(p), p = 0;
if (error0)
*error0 = error;
return p;
}
soxr_error_t soxr_set_input_fn(soxr_t p,
soxr_input_fn_t input_fn, void * input_fn_state, size_t max_ilen)
{
p->input_fn_state = input_fn_state;
p->input_fn = input_fn;
p->max_ilen = max_ilen? max_ilen : (size_t)-1;
return 0;
}
static void soxr_delete0(soxr_t p)
{
unsigned i;
if (p->resamplers) for (i = 0; i < p->num_channels; ++i) {
if (p->resamplers[i])
resampler_close(p->resamplers[i]);
free(p->resamplers[i]);
}
free(p->resamplers);
free(p->channel_ptrs);
free(p->shared);
memset(p, 0, sizeof(*p));
}
double soxr_delay(soxr_t p)
{
return
(p && !p->error && p->resamplers)? resampler_delay(p->resamplers[0]) : 0;
}
static soxr_error_t fatal_error(soxr_t p, soxr_error_t error)
{
soxr_delete0(p);
return p->error = error;
}
static soxr_error_t initialise(soxr_t p)
{
unsigned i;
size_t shared_size, channel_size;
resampler_sizes(&shared_size, &channel_size);
p->channel_ptrs = calloc(sizeof(*p->channel_ptrs), p->num_channels);
p->shared = calloc(shared_size, 1);
p->resamplers = calloc(sizeof(*p->resamplers), p->num_channels);
if (!p->shared || !p->channel_ptrs || !p->resamplers)
return fatal_error(p, "malloc failed");
for (i = 0; i < p->num_channels; ++i) {
soxr_error_t error;
if (!(p->resamplers[i] = calloc(channel_size, 1)))
return fatal_error(p, "malloc failed");
error = resampler_create(
p->resamplers[i],
p->shared,
p->io_ratio,
&p->q_spec,
&p->runtime_spec,
p->io_spec.scale);
if (error)
return fatal_error(p, error);
}
return 0;
}
soxr_error_t soxr_set_num_channels(soxr_t p, unsigned num_channels)
{
if (!p) return "invalid soxr_t pointer";
if (num_channels == p->num_channels) return p->error;
if (!num_channels) return "invalid # of channels";
if (p->resamplers) return "# of channels can't be changed";
p->num_channels = num_channels;
return soxr_set_io_ratio(p, p->io_ratio, 0);
}
soxr_error_t soxr_set_io_ratio(soxr_t p, double io_ratio, size_t slew_len)
{
unsigned i;
soxr_error_t error;
if (!p) return "invalid soxr_t pointer";
if ((error = p->error)) return error;
if (!p->num_channels) return "must set # channels before O/I ratio";
if (io_ratio <= 0) return "I/O ratio out-of-range";
if (!p->channel_ptrs) {
p->io_ratio = io_ratio;
return initialise(p);
}
if (p->control_block[8]) {
for (i = 0; !error && i < p->num_channels; ++i)
resampler_set_io_ratio(p->resamplers[i], io_ratio, slew_len);
return error;
}
return fabs(p->io_ratio - io_ratio) < 1e-15? 0 :
"varying O/I ratio is not supported with this quality level";
}
void soxr_delete(soxr_t p)
{
if (p)
soxr_delete0(p), free(p);
}
soxr_error_t soxr_clear(soxr_t p) /* TODO: this, properly. */
{
if (p) {
struct soxr tmp = *p;
soxr_delete0(p);
memset(p, 0, sizeof(*p));
p->input_fn = tmp.input_fn;
p->runtime_spec = tmp.runtime_spec;
p->q_spec = tmp.q_spec;
p->io_spec = tmp.io_spec;
p->num_channels = tmp.num_channels;
p->input_fn_state = tmp.input_fn_state;
memcpy(p->control_block, tmp.control_block, sizeof(p->control_block));
p->deinterleave = tmp.deinterleave;
p->interleave = tmp.interleave;
return (p->q_spec.flags & RESET_ON_CLEAR)?
soxr_set_io_ratio(p, tmp.io_ratio, 0) : 0;
}
return "invalid soxr_t pointer";
}
static void soxr_input_1ch(soxr_t p, unsigned i, soxr_cbuf_t src, size_t len)
{
sample_t * dest = resampler_input(p->resamplers[i], NULL, len);
(*p->deinterleave)(&dest, p->io_spec.itype, &src, len, 1);
}
static size_t soxr_input(soxr_t p, void const * in, size_t len)
{
bool separated = !!(p->io_spec.itype & SOXR_SPLIT);
unsigned i;
if (!p || p->error) return 0;
if (!in && len) {p->error = "null input buffer pointer"; return 0;}
if (!len) {
p->flushing = true;
return 0;
}
if (separated)
for (i = 0; i < p->num_channels; ++i)
soxr_input_1ch(p, i, ((soxr_cbufs_t)in)[i], len);
else {
for (i = 0; i < p->num_channels; ++i)
p->channel_ptrs[i] = resampler_input(p->resamplers[i], NULL, len);
(*p->deinterleave)(
(sample_t **)p->channel_ptrs, p->io_spec.itype, &in, len, p->num_channels);
}
return len;
}
static size_t soxr_output_1ch(soxr_t p, unsigned i, soxr_buf_t dest, size_t len, bool separated)
{
sample_t const * src;
if (p->flushing)
resampler_flush(p->resamplers[i]);
resampler_process(p->resamplers[i], len);
src = resampler_output(p->resamplers[i], NULL, &len);
if (separated)
p->clips += (p->interleave)(p->io_spec.otype, &dest, &src,
len, 1, (p->io_spec.flags & SOXR_NO_DITHER)? 0 : &p->seed);
else p->channel_ptrs[i] = (void /* const */ *)src;
return len;
}
static size_t soxr_output_no_callback(soxr_t p, soxr_buf_t out, size_t len)
{
unsigned u;
size_t done = 0;
bool separated = !!(p->io_spec.otype & SOXR_SPLIT);
#if defined _OPENMP
int i;
if (!p->runtime_spec.num_threads && p->num_channels > 1)
#pragma omp parallel for
for (i = 0; i < (int)p->num_channels; ++i) {
size_t done1;
done1 = soxr_output_1ch(p, (unsigned)i, ((soxr_bufs_t)out)[i], len, separated);
if (!i)
done = done1;
} else
#endif
for (u = 0; u < p->num_channels; ++u)
done = soxr_output_1ch(p, u, ((soxr_bufs_t)out)[u], len, separated);
if (!separated)
p->clips += (p->interleave)(p->io_spec.otype, &out, (sample_t const * const *)p->channel_ptrs,
done, p->num_channels, (p->io_spec.flags & SOXR_NO_DITHER)? 0 : &p->seed);
return done;
}
size_t soxr_output(soxr_t p, void * out, size_t len0)
{
size_t odone, odone0 = 0, olen = len0, osize, idone;
size_t ilen = min(p->max_ilen, (size_t)ceil((double)olen *p->io_ratio));
void const * in = out; /* Set to !=0, so that caller may leave unset. */
bool was_flushing;
if (!p || p->error) return 0;
if (!out && len0) {p->error = "null output buffer pointer"; return 0;}
do {
odone = soxr_output_no_callback(p, out, olen);
odone0 += odone;
if (odone0 == len0 || !p->input_fn || p->flushing)
break;
osize = soxr_datatype_size(p->io_spec.otype) * p->num_channels;
out = (char *)out + osize * odone;
olen -= odone;
idone = p->input_fn(p->input_fn_state, &in, ilen);
was_flushing = p->flushing;
if (!in)
p->error = "input function reported failure";
else soxr_input(p, in, idone);
} while (odone || idone || (!was_flushing && p->flushing));
return odone0;
}
static size_t soxr_i_for_o(soxr_t p, size_t olen, size_t ilen)
{
size_t result;
#if 0
if (p->runtime_spec.flags & SOXR_STRICT_BUFFERING)
result = rate_i_for_o(p->resamplers[0], olen);
else
#endif
result = (size_t)ceil((double)olen * p->io_ratio);
return min(result, ilen);
}
#if 0
static size_t soxr_o_for_i(soxr_t p, size_t ilen, size_t olen)
{
size_t result = (size_t)ceil((double)ilen / p->io_ratio);
return min(result, olen);
}
#endif
soxr_error_t soxr_process(soxr_t p,
void const * in , size_t ilen0, size_t * idone0,
void * out, size_t olen , size_t * odone0)
{
size_t ilen, idone, odone = 0;
unsigned u;
bool flush_requested = false;
if (!p) return "null pointer";
if (!in)
flush_requested = true, ilen = ilen0 = 0;
else {
if ((ptrdiff_t)ilen0 < 0)
flush_requested = true, ilen0 = ~ilen0;
if (idone0 && (1 || flush_requested))
ilen = soxr_i_for_o(p, olen, ilen0);
else
ilen = ilen0/*, olen = soxr_o_for_i(p, ilen, olen)*/;
}
p->flushing |= ilen == ilen0 && flush_requested;
if (!out && !in)
idone = ilen;
else if (p->io_spec.itype & p->io_spec.otype & SOXR_SPLIT) { /* Both i & o */
#if defined _OPENMP
int i;
if (!p->runtime_spec.num_threads && p->num_channels > 1)
#pragma omp parallel for
for (i = 0; i < (int)p->num_channels; ++i) {
size_t done;
if (in)
soxr_input_1ch(p, (unsigned)i, ((soxr_cbufs_t)in)[i], ilen);
done = soxr_output_1ch(p, (unsigned)i, ((soxr_bufs_t)out)[i], olen, true);
if (!i)
odone = done;
} else
#endif
for (u = 0; u < p->num_channels; ++u) {
if (in)
soxr_input_1ch(p, u, ((soxr_cbufs_t)in)[u], ilen);
odone = soxr_output_1ch(p, u, ((soxr_bufs_t)out)[u], olen, true);
}
idone = ilen;
}
else {
idone = ilen? soxr_input (p, in , ilen) : 0;
odone = soxr_output(p, out, olen);
}
if (idone0) *idone0 = idone;
if (odone0) *odone0 = odone;
return p->error;
}
soxr_error_t soxr_oneshot(
double irate, double orate,
unsigned num_channels,
void const * in , size_t ilen, size_t * idone,
void * out, size_t olen, size_t * odone,
soxr_io_spec_t const * io_spec,
soxr_quality_spec_t const * q_spec,
soxr_runtime_spec_t const * runtime_spec)
{
soxr_t resampler = NULL;
soxr_error_t error = q_spec? q_spec->e : 0;
if (!error) {
soxr_quality_spec_t q_spec1;
if (!q_spec)
q_spec1 = soxr_quality_spec(SOXR_LQ, 0), q_spec = &q_spec1;
resampler = soxr_create(irate, orate, num_channels,
&error, io_spec, q_spec, runtime_spec);
}
if (!error) {
error = soxr_process(resampler, in, ~ilen, idone, out, olen, odone);
soxr_delete(resampler);
}
return error;
}
soxr_error_t soxr_set_error(soxr_t p, soxr_error_t error)
{
if (!p) return "null pointer";
if (!p->error && p->error != error) return p->error;
p->error = error;
return 0;
}
|
GB_binop__bshift_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__bshift_int32
// A.*B function (eWiseMult): GB_AemultB__bshift_int32
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__bshift_int32
// C+=b function (dense accum): GB_Cdense_accumb__bshift_int32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bshift_int32
// C=scalar+B GB_bind1st__bshift_int32
// C=scalar+B' GB_bind1st_tran__bshift_int32
// C=A+scalar GB_bind2nd__bshift_int32
// C=A'+scalar GB_bind2nd_tran__bshift_int32
// C type: int32_t
// A type: int32_t
// B,b type: int8_t
// BinaryOp: cij = GB_bitshift_int32 (aij, bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
0
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = GB_bitshift_int32 (x, y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BSHIFT || GxB_NO_INT32 || GxB_NO_BSHIFT_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__bshift_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__bshift_int32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__bshift_int32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *GB_RESTRICT Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *GB_RESTRICT Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__bshift_int32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__bshift_int32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__bshift_int32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t bij = Bx [p] ;
Cx [p] = GB_bitshift_int32 (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__bshift_int32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t aij = Ax [p] ;
Cx [p] = GB_bitshift_int32 (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = GB_bitshift_int32 (x, aij) ; \
}
GrB_Info GB_bind1st_tran__bshift_int32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = GB_bitshift_int32 (aij, y) ; \
}
GrB_Info GB_bind2nd_tran__bshift_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.