source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
csr_matvec_oomp.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Matvec functions for hypre_CSRMatrix class.
*
*****************************************************************************/
#include "seq_mv.h"
#if defined(HYPRE_USING_DEVICE_OPENMP)
/*--------------------------------------------------------------------------
* hypre_CSRMatrixMatvec
*--------------------------------------------------------------------------*/
/* y[offset:end] = alpha*A[offset:end,:]*x + beta*b[offset:end] */
HYPRE_Int
hypre_CSRMatrixMatvecOutOfPlaceOOMP( HYPRE_Int trans,
HYPRE_Complex alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
HYPRE_Complex beta,
hypre_Vector *b,
hypre_Vector *y,
HYPRE_Int offset )
{
HYPRE_Int A_nrows = hypre_CSRMatrixNumRows(A);
HYPRE_Int A_ncols = hypre_CSRMatrixNumCols(A);
HYPRE_Int A_nnz = hypre_CSRMatrixNumNonzeros(A);
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A) + offset;
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int y_size = hypre_VectorSize(y) - offset;
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Complex *b_data = hypre_VectorData(b) + offset;
HYPRE_Complex *y_data = hypre_VectorData(y) + offset;
HYPRE_Int i;
#ifdef HYPRE_USING_CUSPARSE
cusparseHandle_t handle = hypre_HandleCusparseHandle(hypre_handle());
cusparseMatDescr_t descr = hypre_HandleCusparseMatDescr(hypre_handle());
#endif
//hypre_CSRMatrixPrefetch(A, HYPRE_MEMORY_DEVICE);
//hypre_SeqVectorPrefetch(x, HYPRE_MEMORY_DEVICE);
//hypre_SeqVectorPrefetch(b, HYPRE_MEMORY_DEVICE);
//if (b != y)
//{
// hypre_SeqVectorPrefetch(y, HYPRE_MEMORY_DEVICE);
//}
if (b != y)
{
#pragma omp target teams distribute parallel for private(i) is_device_ptr(y_data, b_data)
for (i = 0; i < y_size; i++)
{
y_data[i] = b_data[i];
}
}
if (x == y)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ERROR::x and y are the same pointer in hypre_CSRMatrixMatvecDevice\n");
}
// TODO
if (offset != 0)
{
hypre_printf("WARNING:: Offset is not zero in hypre_CSRMatrixMatvecDevice :: \n");
}
hypre_assert(offset == 0);
if (trans)
{
HYPRE_Complex *csc_a = hypre_TAlloc(HYPRE_Complex, A->num_nonzeros, HYPRE_MEMORY_DEVICE);
HYPRE_Int *csc_j = hypre_TAlloc(HYPRE_Int, A->num_nonzeros, HYPRE_MEMORY_DEVICE);
HYPRE_Int *csc_i = hypre_TAlloc(HYPRE_Int, A->num_cols+1, HYPRE_MEMORY_DEVICE);
HYPRE_CUSPARSE_CALL( cusparseDcsr2csc(handle, A->num_rows, A->num_cols, A->num_nonzeros,
A->data, A->i, A->j, csc_a, csc_j, csc_i,
CUSPARSE_ACTION_NUMERIC, CUSPARSE_INDEX_BASE_ZERO) );
#ifdef HYPRE_USING_CUSPARSE
HYPRE_CUSPARSE_CALL( cusparseDcsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
A->num_cols, A->num_rows, A->num_nonzeros,
&alpha, descr,
csc_a, csc_i, csc_j,
x->data, &beta, y->data) );
#else
#pragma omp target teams distribute parallel for private(i) is_device_ptr(csc_a, csc_i, csc_j, y_data, x_data)
for (i = 0; i < A_ncols; i++)
{
HYPRE_Complex tempx = 0.0;
HYPRE_Int j;
for (j = csc_i[i]; j < csc_i[i+1]; j++)
{
tempx += csc_a[j] * x_data[csc_j[j]];
}
y_data[i] = alpha*tempx + beta*y_data[i];
}
#endif
hypre_TFree(csc_a, HYPRE_MEMORY_DEVICE);
hypre_TFree(csc_i, HYPRE_MEMORY_DEVICE);
hypre_TFree(csc_j, HYPRE_MEMORY_DEVICE);
}
else
{
#ifdef HYPRE_USING_CUSPARSE
HYPRE_CUSPARSE_CALL( cusparseDcsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
A_nrows, A_ncols, A_nnz,
&alpha, descr,
A_data, A_i, A_j,
x_data, &beta, y_data) );
#else
#pragma omp target teams distribute parallel for private(i) is_device_ptr(A_data, A_i, A_j, y_data, x_data)
for (i = 0; i < A_num_rows; i++)
{
HYPRE_Complex tempx = 0.0;
HYPRE_Int j;
for (j = A_i[i]; j < A_i[i+1]; j++)
{
tempx += A_data[j] * x_data[A_j[j]];
}
y_data[i] = alpha*tempx + beta*y_data[i];
}
#endif
}
return hypre_error_flag;
}
#endif /* #if defined(HYPRE_USING_DEVICE_OPENMP) */
|
dd_graph.h | #pragma once
#include "./dd_header.h"
#if defined(koala)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wcast-align"
#pragma GCC diagnostic ignored "-Wcast-qual"
#pragma GCC diagnostic ignored "-Wextra"
#pragma GCC diagnostic ignored "-Wold-style-cast"
#pragma GCC diagnostic ignored "-Wpedantic"
#pragma GCC diagnostic ignored "-Wshadow"
#pragma GCC diagnostic ignored "-Wswitch-default"
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wunused-variable"
#if __GNUC__ >= 7
#pragma GCC diagnostic ignored "-Wimplicit-fallthrough"
#endif
#if __GNUC__ >= 6
#pragma GCC diagnostic ignored "-Wmisleading-indentation"
#endif
#ifndef __clang__
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#endif
#include "./dd_koala.h"
#pragma GCC diagnostic pop
typedef Koala::Graph<int, int> Graph;
typedef Koala::Graph<int, int>::PVertex Vertex;
#elif defined(snap)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wcast-qual"
#pragma GCC diagnostic ignored "-Wdelete-non-virtual-dtor"
#pragma GCC diagnostic ignored "-Wold-style-cast"
#pragma GCC diagnostic ignored "-Wpedantic"
#pragma GCC diagnostic ignored "-Wshadow"
#pragma GCC diagnostic ignored "-Wunused-parameter"
#if __GNUC__ >= 6
#pragma GCC diagnostic ignored "-Wmisleading-indentation"
#endif
#include "./dd_snap.h"
#pragma GCC diagnostic pop
typedef TNodeNet<TInt> Graph;
typedef int Vertex;
#elif defined(networkit)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wold-style-cast"
#pragma GCC diagnostic ignored "-Wshadow"
#pragma GCC diagnostic ignored "-Wunused-parameter"
#include "./dd_networkit.h"
#pragma GCC diagnostic pop
typedef NetworKit::Graph Graph;
typedef NetworKit::node Vertex;
#endif
#include <cassert>
#include <limits>
#include <string>
#include <set>
#include <vector>
const double EPS = 10e-9;
Graph generate_seed(const int &n0, const double &p0) {
std::random_device device;
std::mt19937 generator(device());
std::uniform_real_distribution<double> edge_distribution(0.0, 1.0);
Graph G;
std::vector<Vertex> V(n0);
for (int i = 0; i < n0; i++) {
V[i] = add_vertex(G, i);
}
for (int i = 0; i < n0; i++) {
for (int j = i + 1; j < n0; j++) {
if (edge_distribution(generator) <= p0) {
add_edge(G, V[i], V[j]);
}
}
}
return G;
}
Graph generate_graph(Graph &G, const int &n, const Parameters ¶ms) {
std::random_device device;
std::mt19937 generator(device());
std::uniform_real_distribution<double> edge_distribution(0.0, 1.0);
std::vector<Vertex> V(get_vertices(G));
V.resize(n);
for (int i = get_graph_size(G); i < n; i++) {
std::uniform_int_distribution<int> parent_distribution(0, i - 1);
int parent = parent_distribution(generator);
V[i] = add_vertex(G, i);
std::set<Vertex> neighbors(get_neighbors(G, V[parent]));
if (params.mode == Mode::PURE_DUPLICATION) {
for (const auto &v : neighbors) {
if (edge_distribution(generator) <= params.p) {
add_edge(G, V[i], v);
}
}
} else if (params.mode == Mode::PURE_DUPLICATION_CONNECTED) {
while (true) {
for (const auto &v : neighbors) {
if (edge_distribution(generator) <= params.p) {
add_edge(G, V[i], v);
}
}
if (get_degree(G, V[i]) > 0) {
break;
}
neighbors = get_neighbors(G, V[parent_distribution(generator)]);
}
} else if (params.mode == Mode::CHUNG_LU) {
for (const auto &v : neighbors) {
if (edge_distribution(generator) <= params.p) {
add_edge(G, V[i], v);
}
}
if (edge_distribution(generator) <= params.q) {
add_edge(G, V[i], V[parent]);
}
} else if (params.mode == Mode::PASTOR_SATORRAS) {
for (int j = 0; j < i; j++) {
if (neighbors.count(V[j])) {
if (edge_distribution(generator) <= params.p) {
add_edge(G, V[i], V[j]);
}
} else {
if (edge_distribution(generator) <= params.r / i) {
add_edge(G, V[i], V[j]);
}
}
}
} else {
throw std::invalid_argument("Invalid mode: " + params.to_string());
}
}
return G;
}
Graph read_graph(const std::string &graph_name) {
std::ifstream graph_file(graph_name);
if (graph_file.fail()) {
throw std::invalid_argument("Missing " + graph_name + " file");
}
Graph G;
std::vector<Vertex> V;
int u, v;
while (graph_file >> u >> v) {
if (v >= get_graph_size(G)) {
for (int i = get_graph_size(G); i <= v; i++) {
V.push_back(add_vertex(G, i));
}
}
if (u != v) {
add_edge(G, V[u], V[v]);
}
}
graph_file.close();
return G;
}
class BasicNeighborhoodStructure {
protected:
template <typename T>
struct counting_iterator {
size_t count;
T dummy;
counting_iterator() : count(0) { }
counting_iterator& operator++() { ++count; return *this; }
counting_iterator operator++(int) { ++count; return *this; }
T& operator*() { return dummy; }
};
public:
virtual int common_neighbors(const Vertex &v, const Vertex &u) const = 0;
virtual void remove_vertex(const std::set<Vertex> &neighbors) = 0;
virtual void restore_vertex(const std::set<Vertex> &neighbors) = 0;
virtual bool verify(const Graph &G) const = 0;
};
class NoNeighborhoodStructure : public BasicNeighborhoodStructure {
private:
const Graph &G;
public:
explicit NoNeighborhoodStructure(const Graph &H) : G(H) { }
explicit NoNeighborhoodStructure(const NoNeighborhoodStructure &other) : G(other.G) { }
int common_neighbors(const Vertex &v, const Vertex &u) const {
auto N_v(get_neighbors(G, v));
auto N_u(get_neighbors(G, u));
return set_intersection(
N_v.begin(), N_v.end(), N_u.begin(), N_u.end(), counting_iterator<Vertex>()).count;
}
void remove_vertex(const std::set<Vertex>&) { }
void restore_vertex(const std::set<Vertex>&) { }
bool verify(const Graph &H) const { return &G == &H; }
};
class CompleteNeighborhoodStructure : public BasicNeighborhoodStructure {
private:
int n;
std::vector<int> V;
public:
explicit CompleteNeighborhoodStructure(
const Graph &G) : n(get_graph_size(G)), V(get_graph_size(G) * get_graph_size(G)) {
auto vertices = get_vertices(G);
#pragma omp parallel for
for (std::size_t v_i = 0; v_i < vertices.size(); v_i++) {
auto N_v(get_neighbors(G, vertices[v_i]));
for (std::size_t u_i = 0; u_i < vertices.size(); u_i++) {
auto N_u(get_neighbors(G, vertices[u_i]));
V[get_index(vertices[v_i], vertices[u_i], n)] =
set_intersection(
N_v.begin(), N_v.end(), N_u.begin(), N_u.end(),
counting_iterator<Vertex>()).count;
}
}
}
explicit CompleteNeighborhoodStructure(
const CompleteNeighborhoodStructure &other) : n(other.n), V(other.V) { }
int common_neighbors(const Vertex &v, const Vertex &u) const {
return V[get_index(v, u, n)];
}
void remove_vertex(const std::set<Vertex> &neighbors) {
for (const auto &w : neighbors) {
for (const auto &u : neighbors) {
--V[get_index(w, u, n)];
}
}
}
void restore_vertex(const std::set<Vertex> &neighbors) {
for (const auto &w : neighbors) {
for (const auto &u : neighbors) {
++V[get_index(w, u, n)];
}
}
}
bool verify(const Graph &G) const {
auto vertices = get_vertices(G);
for (const auto &v : vertices) {
std::set<Vertex> N_v(get_neighbors(G, v));
for (const auto &u : vertices) {
std::set<Vertex> N_u(get_neighbors(G, u));
int common_from_graph =
set_intersection(
N_v.begin(), N_v.end(), N_u.begin(), N_u.end(),
counting_iterator<Vertex>()).count;
int common_from_struct = V[get_index(v, u, n)];
if (common_from_graph != common_from_struct) {
return false;
}
}
}
return true;
}
};
inline long double add_exp_log(const long double &x, const long double &y) {
if (x == -std::numeric_limits<long double>::infinity()) {
return y;
}
if (y == -std::numeric_limits<long double>::infinity()) {
return x;
}
return x > y ? x + log2l(1.0L + exp2l(y - x)) : y + log2l(1.0L + exp2l(x - y));
}
bool is_feasible(
const Graph &G, const Parameters ¶ms, const Vertex &v, const Vertex &u,
const BasicNeighborhoodStructure &aux) {
switch (params.mode) {
case Mode::PURE_DUPLICATION: {
bool uv = check_edge(G, u, v);
int both = aux.common_neighbors(v, u), only_v = get_degree(G, v) - both - uv;
if (!uv && only_v == 0) {
return true;
}
return false;
}
case Mode::PASTOR_SATORRAS:
return true;
default:
throw std::invalid_argument("Invalid mode: " + params.to_string());
}
}
bool is_feasible(
const Graph &G, const Parameters ¶ms, const Vertex &v,
const BasicNeighborhoodStructure &aux) {
switch (params.mode) {
case Mode::PURE_DUPLICATION: {
std::vector<Vertex> V(get_vertices(G));
for (const auto &u : V) {
if (u != v) {
if (is_feasible(G, params, v, u, aux)) {
return true;
}
}
}
return false;
}
case Mode::PASTOR_SATORRAS:
return true;
default:
throw std::invalid_argument("Invalid mode: " + params.to_string());
}
}
long double get_log_transition_probability(
const Graph &G, const Parameters ¶ms, const Vertex &v, const Vertex &u,
const BasicNeighborhoodStructure &aux) {
if (!is_feasible(G, params, v, u, aux)) {
return -std::numeric_limits<long double>::infinity();
}
bool uv = check_edge(G, u, v);
int both = aux.common_neighbors(v, u), only_v = get_degree(G, v) - both,
only_u = get_degree(G, u) - both - uv,
none = (get_graph_size(G) - 1) + both - only_u - only_v;
long double p(params.p), r(params.r);
switch (params.mode) {
case Mode::PURE_DUPLICATION:
assert(!(fabsl(p) < EPS && both > 0));
assert(!(fabsl(1 - p) < EPS && only_u > 0));
return both * log2l(p) + only_u * log2l(1 - p) - log2l(get_graph_size(G) - 1);
case Mode::PASTOR_SATORRAS:
assert(!(fabsl(p) < EPS && both > 0));
assert(!(fabsl(1 - p) < EPS && only_u > 0));
assert(!(fabsl(r) < EPS && only_v > 0));
assert(!(fabsl((get_graph_size(G) - 1) - r) < EPS && none > 0));
return both * log2l(p) + only_u * log2l(1 - p) + only_v * log2l(r)
+ none * log2l(get_graph_size(G) - 1 - r)
- (only_v + none + 1) * log2l(get_graph_size(G) - 1);
default:
throw std::invalid_argument("Invalid mode: " + params.to_string());
}
}
long double get_log_transition_probability(
const Graph &G, const Parameters ¶ms,
const Vertex &v, const BasicNeighborhoodStructure &aux) {
long double p_v = -std::numeric_limits<long double>::infinity();
std::vector<Vertex> V(get_vertices(G));
for (const auto &u : V) {
if (u != v) {
p_v = add_exp_log(p_v, get_log_transition_probability(G, params, v, u, aux));
}
}
return p_v;
}
std::vector<long double> get_transition_probability(
const Graph &G, const Parameters ¶ms, const BasicNeighborhoodStructure &aux) {
std::vector<long double> out;
std::vector<Vertex> V(get_vertices(G));
for (const auto &v : V) {
out.push_back(exp2l(get_log_transition_probability(G, params, v, aux)));
}
return out;
}
long double get_discard_score(
const Graph &G, const Parameters ¶ms, const Vertex &v, const Vertex &u,
const BasicNeighborhoodStructure &aux) {
bool uv = check_edge(G, u, v);
int both = aux.common_neighbors(v, u), only_v = get_degree(G, v) - both - uv,
only_u = get_degree(G, u) - both - uv;
switch (params.mode) {
case Mode::PURE_DUPLICATION:
return !uv && only_v == 0 ? expl(-4 * only_u) : 0.0L;
case Mode::PASTOR_SATORRAS:
return expl(-4 * only_u - 8 * only_v);
default:
throw std::invalid_argument("Invalid mode: " + params.to_string());
}
}
long double get_discard_score(
const Graph &G, const Parameters ¶ms, const Vertex &v,
const BasicNeighborhoodStructure &aux) {
long double out = 0.0L;
std::vector<Vertex> V(get_vertices(G));
for (const auto &u : V) {
out += get_discard_score(G, params, v, u, aux);
}
return out;
}
|
7952.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4096x4096. */
#include "convolution-2d.h"
/* Array initialization. */
static
void init_array (int ni, int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj))
{
// printf("Initializing Array\n");
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++)
{
A[i][j] = ((DATA_TYPE) (i + j) / nj);
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nj,
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++) {
fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]);
if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_conv2d(int ni,
int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
#pragma scop
#pragma omp parallel for private(i, j) collapse(#P12) schedule(#P9, #P11) num_threads(#P11) #same issue as atax
#pragma omp parallel for schedule(dynamic, 4) simd num_threads(4)
for (i = 1; i < _PB_NI - 1; ++i)
{
#pragma omp parallel for schedule(dynamic, 4) simd num_threads(4)
for (j = 1; j < _PB_NJ - 1; ++j)
{
B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1]
+ -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1]
+ 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1];
}
}
#pragma endscop
// printf("Kernal computation complete !!\n");
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj);
/* Initialize array(s). */
init_array (ni, nj, POLYBENCH_ARRAY(A));
/* Start timer. */
//polybench_start_instruments;
polybench_timer_start();
/* Run kernel. */
kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B));
/* Stop and print timer. */
polybench_timer_stop();
polybench_timer_print();
//polybench_stop_instruments;
//polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
return 0;
}
|
crop_and_resize.c | #include <TH/TH.h>
#include <stdio.h>
#include <math.h>
void CropAndResizePerBox(
const float * image_data,
const int batch_size,
const int depth,
const int image_height,
const int image_width,
const float * boxes_data,
const int * box_index_data,
const int start_box,
const int limit_box,
float * corps_data,
const int crop_height,
const int crop_width,
const float extrapolation_value
) {
const int image_channel_elements = image_height * image_width;
const int image_elements = depth * image_channel_elements;
const int channel_elements = crop_height * crop_width;
const int crop_elements = depth * channel_elements;
int b;
#pragma omp parallel for
for (b = start_box; b < limit_box; ++b) {
const float * box = boxes_data + b * 4;
const float y1 = box[0];
const float x1 = box[1];
const float y2 = box[2];
const float x2 = box[3];
const int b_in = box_index_data[b];
if (b_in < 0 || b_in >= batch_size) {
printf("Error: batch_index %d out of range [0, %d)\n", b_in, batch_size);
exit(-1);
}
const float height_scale =
(crop_height > 1)
? (y2 - y1) * (image_height - 1) / (crop_height - 1)
: 0;
const float width_scale =
(crop_width > 1) ? (x2 - x1) * (image_width - 1) / (crop_width - 1)
: 0;
for (int y = 0; y < crop_height; ++y)
{
const float in_y = (crop_height > 1)
? y1 * (image_height - 1) + y * height_scale
: 0.5 * (y1 + y2) * (image_height - 1);
if (in_y < 0 || in_y > image_height - 1)
{
for (int x = 0; x < crop_width; ++x)
{
for (int d = 0; d < depth; ++d)
{
// crops(b, y, x, d) = extrapolation_value;
corps_data[crop_elements * b + channel_elements * d + y * crop_width + x] = extrapolation_value;
}
}
continue;
}
const int top_y_index = floorf(in_y);
const int bottom_y_index = ceilf(in_y);
const float y_lerp = in_y - top_y_index;
for (int x = 0; x < crop_width; ++x)
{
const float in_x = (crop_width > 1)
? x1 * (image_width - 1) + x * width_scale
: 0.5 * (x1 + x2) * (image_width - 1);
if (in_x < 0 || in_x > image_width - 1)
{
for (int d = 0; d < depth; ++d)
{
corps_data[crop_elements * b + channel_elements * d + y * crop_width + x] = extrapolation_value;
}
continue;
}
const int left_x_index = floorf(in_x);
const int right_x_index = ceilf(in_x);
const float x_lerp = in_x - left_x_index;
for (int d = 0; d < depth; ++d)
{
const float *pimage = image_data + b_in * image_elements + d * image_channel_elements;
const float top_left = pimage[top_y_index * image_width + left_x_index];
const float top_right = pimage[top_y_index * image_width + right_x_index];
const float bottom_left = pimage[bottom_y_index * image_width + left_x_index];
const float bottom_right = pimage[bottom_y_index * image_width + right_x_index];
const float top = top_left + (top_right - top_left) * x_lerp;
const float bottom =
bottom_left + (bottom_right - bottom_left) * x_lerp;
corps_data[crop_elements * b + channel_elements * d + y * crop_width + x] = top + (bottom - top) * y_lerp;
}
} // end for x
} // end for y
} // end for b
}
void crop_and_resize_forward(
THFloatTensor * image,
THFloatTensor * boxes, // [y1, x1, y2, x2]
THIntTensor * box_index, // range in [0, batch_size)
const float extrapolation_value,
const int crop_height,
const int crop_width,
THFloatTensor * crops
) {
const int batch_size = image->size[0];
const int depth = image->size[1];
const int image_height = image->size[2];
const int image_width = image->size[3];
const int num_boxes = boxes->size[0];
// init output space
THFloatTensor_resize4d(crops, num_boxes, depth, crop_height, crop_width);
THFloatTensor_zero(crops);
// crop_and_resize for each box
CropAndResizePerBox(
THFloatTensor_data(image),
batch_size,
depth,
image_height,
image_width,
THFloatTensor_data(boxes),
THIntTensor_data(box_index),
0,
num_boxes,
THFloatTensor_data(crops),
crop_height,
crop_width,
extrapolation_value
);
}
void crop_and_resize_backward(
THFloatTensor * grads,
THFloatTensor * boxes, // [y1, x1, y2, x2]
THIntTensor * box_index, // range in [0, batch_size)
THFloatTensor * grads_image // resize to [bsize, c, hc, wc]
)
{
// shape
const int batch_size = grads_image->size[0];
const int depth = grads_image->size[1];
const int image_height = grads_image->size[2];
const int image_width = grads_image->size[3];
const int num_boxes = grads->size[0];
const int crop_height = grads->size[2];
const int crop_width = grads->size[3];
// n_elements
const int image_channel_elements = image_height * image_width;
const int image_elements = depth * image_channel_elements;
const int channel_elements = crop_height * crop_width;
const int crop_elements = depth * channel_elements;
// init output space
THFloatTensor_zero(grads_image);
// data pointer
const float * grads_data = THFloatTensor_data(grads);
const float * boxes_data = THFloatTensor_data(boxes);
const int * box_index_data = THIntTensor_data(box_index);
float * grads_image_data = THFloatTensor_data(grads_image);
for (int b = 0; b < num_boxes; ++b) {
const float * box = boxes_data + b * 4;
const float y1 = box[0];
const float x1 = box[1];
const float y2 = box[2];
const float x2 = box[3];
const int b_in = box_index_data[b];
if (b_in < 0 || b_in >= batch_size) {
printf("Error: batch_index %d out of range [0, %d)\n", b_in, batch_size);
exit(-1);
}
const float height_scale =
(crop_height > 1) ? (y2 - y1) * (image_height - 1) / (crop_height - 1)
: 0;
const float width_scale =
(crop_width > 1) ? (x2 - x1) * (image_width - 1) / (crop_width - 1)
: 0;
for (int y = 0; y < crop_height; ++y)
{
const float in_y = (crop_height > 1)
? y1 * (image_height - 1) + y * height_scale
: 0.5 * (y1 + y2) * (image_height - 1);
if (in_y < 0 || in_y > image_height - 1)
{
continue;
}
const int top_y_index = floorf(in_y);
const int bottom_y_index = ceilf(in_y);
const float y_lerp = in_y - top_y_index;
for (int x = 0; x < crop_width; ++x)
{
const float in_x = (crop_width > 1)
? x1 * (image_width - 1) + x * width_scale
: 0.5 * (x1 + x2) * (image_width - 1);
if (in_x < 0 || in_x > image_width - 1)
{
continue;
}
const int left_x_index = floorf(in_x);
const int right_x_index = ceilf(in_x);
const float x_lerp = in_x - left_x_index;
for (int d = 0; d < depth; ++d)
{
float *pimage = grads_image_data + b_in * image_elements + d * image_channel_elements;
const float grad_val = grads_data[crop_elements * b + channel_elements * d + y * crop_width + x];
const float dtop = (1 - y_lerp) * grad_val;
pimage[top_y_index * image_width + left_x_index] += (1 - x_lerp) * dtop;
pimage[top_y_index * image_width + right_x_index] += x_lerp * dtop;
const float dbottom = y_lerp * grad_val;
pimage[bottom_y_index * image_width + left_x_index] += (1 - x_lerp) * dbottom;
pimage[bottom_y_index * image_width + right_x_index] += x_lerp * dbottom;
} // end d
} // end x
} // end y
} // end b
} |
yolov2_forward_network_quantized.c | #include "additionally.h" // some definitions from: im2col.h, blas.h, list.h, utils.h, activations.h, tree.h, layer.h, network.h
// softmax_layer.h, reorg_layer.h, route_layer.h, region_layer.h, maxpool_layer.h, convolutional_layer.h
#define GEMMCONV
//#define SSE41
//#undef AVX
#define W_MAX_VAL (256/2 - 1) // 7-bit (1-bit sign)
#define I_MAX_VAL (256/2 - 1) // 7-bit (1-bit sign)
#define R_MAX_VAL (256*256/2 - 1) // 31-bit (1-bit sign)
#define R_MULT (32) // 4 - 32
/*
// from: box.h
typedef struct {
float x, y, w, h;
} box;
*/
int max_abs(int src, int max_val)
{
if (abs(src) > abs(max_val)) src = (src > 0) ? max_val : -max_val;
return src;
}
short int max_abs_short(short int src, short int max_val)
{
if (abs(src) > abs(max_val)) src = (src > 0) ? max_val : -max_val;
return src;
}
int * get_distribution(float *arr_ptr, int arr_size, int number_of_ranges, float start_range)
{
//const int number_of_ranges = 32;
//const float start_range = 1.F / 65536;
int *count = calloc(number_of_ranges, sizeof(int));
float min_val = 10000, max_val = 0;
int i, j;
for (i = 0; i < arr_size; ++i) {
float w = arr_ptr[i];
float cur_range = start_range;
for (j = 0; j < number_of_ranges; ++j) {
if (fabs(cur_range) <= w && w < fabs(cur_range * 2))
count[j]++;// , printf("found \n");
cur_range *= 2;
//printf("%f, ", w);
}
}
return count;
}
float get_multiplier(float *arr_ptr, int arr_size, int bits_length)
{
const int number_of_ranges = 32;
const float start_range = 1.F / 65536;
int i, j;
int *count = get_distribution(arr_ptr, arr_size, number_of_ranges, start_range);
int max_count_range = 0;
int index_max_count = 0;
for (j = 0; j < number_of_ranges; ++j) {
int counter = 0;
for (i = j; i < (j + bits_length) && i < number_of_ranges; ++i)
{
counter += count[i];
//counter += log2(count[i]);
}
if (max_count_range < counter) {
max_count_range = counter;
index_max_count = j;
}
}
//index_max_count = index_max_count + 2; // optimal shift multipler
float multiplier = 1 / (start_range * powf(2., (float)index_max_count));
//printf(" max_count_range = %d, index_max_count = %d, multiplier = %g \n",
// max_count_range, index_max_count, multiplier);
free(count);
return multiplier;
}
#ifdef OPENCV
#include <opencv2/core/fast_math.hpp>
#include "opencv2/highgui/highgui_c.h"
#include "opencv2/core/core_c.h"
#include "opencv2/core/version.hpp"
void draw_distribution(float *arr_ptr, int arr_size, char *name)
{
int img_w = 1200, img_h = 800;
const int number_of_ranges = 32;
const float start_range = 1.F / 65536;
//int *count = calloc(number_of_ranges, sizeof(int));
//float min_val = 100, max_val = 0;
int i, j;
int *count = get_distribution(arr_ptr, arr_size, number_of_ranges, start_range);
float multiplier = get_multiplier(arr_ptr, arr_size, 8);
int max_count_range = 0;
for (j = 0; j < number_of_ranges; ++j) {
count[j] = log2(count[j]);
if (max_count_range < count[j])
max_count_range = count[j];
}
cvNamedWindow("Wights", CV_WINDOW_NORMAL);
cvResizeWindow("Wights", img_w, img_h);
IplImage *img = cvCreateImage(cvSize(img_w, img_h), IPL_DEPTH_8U, 3);
if (max_count_range > 0) {
for (j = 0; j < number_of_ranges; ++j) {
//printf("count[j] = %d, max_count_range = %d, img_w = %d, img_h = %d, j = %d, number_of_ranges = %d \n",
// count[j], max_count_range, img_w, img_h, j, number_of_ranges);
CvPoint pt1, pt2;
pt1.x = j*img_w / number_of_ranges;
pt2.x = (j + 1)*img_w / number_of_ranges;
pt1.y = img_h;
pt2.y = img_h - img_h*count[j] / max_count_range;
//printf("pt1.x = %d, pt1.y = %d, pt2.x = %d, pt2.y = %d \n", pt1.x, pt1.y, pt2.x, pt2.y);
//if(pt2.y < pt1.y)
cvRectangle(img, pt1, pt2, CV_RGB(128, 64, 32), CV_FILLED, 8, 0);
cvRectangle(img, pt1, pt2, CV_RGB(32, 32, 32), 1, 8, 0);
}
}
int index_multiplier = log2(1 / (multiplier*start_range));
int x_coord_multiplier = index_multiplier*img_w / number_of_ranges;
cvLine(img, cvPoint(x_coord_multiplier, 0), cvPoint(x_coord_multiplier, img_h), CV_RGB(255, 32, 32), 1, 8, 0);
char buff[256];
//sprintf(buff, "[%g - %g]", min_val, max_val);
sprintf(buff, "optimal multiplier = %g", multiplier);
//printf("[%g - %g]", min_val, max_val);
CvFont font;
cvInitFont(&font, CV_FONT_HERSHEY_COMPLEX, 1, 1, 0, 2, 8);
cvPutText(img, buff, cvPoint(100, 50), &font, CV_RGB(32, 64, 128));
if (name)
cvPutText(img, name, cvPoint(0, 20), &font, CV_RGB(32, 64, 128));
float cur_range = start_range;
cvInitFont(&font, CV_FONT_HERSHEY_COMPLEX, 0.5, 0.5, 0, 1, 8);
for (j = 0; j < number_of_ranges; ++j) {
CvPoint pt_text = cvPoint(j*img_w / number_of_ranges, img_h - 50);
int lg = log2(cur_range);
sprintf(buff, "%d", lg);
cvPutText(img, buff, pt_text, &font, CV_RGB(32, 64, 128));
cur_range *= 2;
}
cvPutText(img, "X and Y are log2", cvPoint(img_w / 2 - 100, img_h - 10), &font, CV_RGB(32, 64, 128));
cvShowImage("Wights", img);
cvWaitKey(0);
free(count);
}
#endif // OPENCV
// im2col.c
int8_t im2col_get_pixel_int8(int8_t *im, int height, int width, int channels,
int row, int col, int channel, int pad)
{
row -= pad;
col -= pad;
if (row < 0 || col < 0 ||
row >= height || col >= width) return 0;
return im[col + width*(row + height*channel)];
}
// im2col.c
//From Berkeley Vision's Caffe!
//https://github.com/BVLC/caffe/blob/master/LICENSE
void im2col_cpu_int8(int8_t* data_im,
int channels, int height, int width,
int ksize, int stride, int pad, int8_t* data_col)
{
int c, h, w;
int height_col = (height + 2 * pad - ksize) / stride + 1;
int width_col = (width + 2 * pad - ksize) / stride + 1;
int channels_col = channels * ksize * ksize;
for (c = 0; c < channels_col; ++c) {
int w_offset = c % ksize;
int h_offset = (c / ksize) % ksize;
int c_im = c / ksize / ksize;
for (h = 0; h < height_col; ++h) {
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h * stride;
int im_col = w_offset + w * stride;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel_int8(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
}
}
// Use to enable AVX or SSE41
//#define AVX // 1.35 sec (0.8 FPS) 2.3x - GCC -mavx -mavx2 -mfma -ffp-contract=fast
//#define SSE41 // 1.55 sec (0.7 FPS) 2x
// default 3.10 sec (0.3 FPS)
#if defined(AVX) || defined(SSE41)
#ifdef _WIN64
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <ammintrin.h>
#include <immintrin.h>
#include <smmintrin.h>
#include <emmintrin.h>
// https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=broad&expand=561
#endif // AVX or SSE41
#if defined(AVX)
__m256i _mm256_div_epi16(const __m256i va, const int b)
{
__m256i vb = _mm256_set1_epi16(32768 / b);
return _mm256_mulhrs_epi16(va, vb);
}
#define INTERMEDIATE_MULT 15 // 8 or 15
#define FINAL_MULT (R_MULT / INTERMEDIATE_MULT)
// 0.89 sec
void gemm_nn_int8_int16_conv16(int M, int N, int K, int8_t ALPHA,
int8_t *A, int lda,
int8_t *B, int ldb,
int16_t *C, int ldc)
{
__m256i res;
__m256i a, b, d;
__m128i tmp128;
__m256i div256 = _mm256_set1_epi16(INTERMEDIATE_MULT);
int16_t *c_tmp = calloc(N, sizeof(int16_t));
int i, j, k;
for (i = 0; i < M; ++i) {
for (k = 0; k < K; ++k) {
register int16_t A_PART = ALPHA*A[i*lda + k];
a = _mm256_set1_epi16(A_PART);
for (j = 0; j < N - 32; j += 32) {
int index = k*ldb + j;
d = _mm256_loadu_si256((__m256i*)&B[index]);
tmp128 = _mm256_extractf128_si256(d, 0);// get low 128 bit
b = _mm256_cvtepi8_epi16(tmp128); // int8 -> int16
b = _mm256_mullo_epi16(a, b); // B = A * B
b = _mm256_div_epi16(b, INTERMEDIATE_MULT); // B = (A * B) / INTERMEDIATE_MULL
res = _mm256_loadu_si256(&c_tmp[j]); // load temp C
res = _mm256_add_epi16(b, res); // (A*B) + C
_mm256_storeu_si256(&c_tmp[j], res); // store temp C
tmp128 = _mm256_extractf128_si256(d, 1);// get high 128 bit
b = _mm256_cvtepi8_epi16(tmp128); // int8 -> int16 (for low 8 bytes)
b = _mm256_mullo_epi16(a, b); // B = A * B
b = _mm256_div_epi16(b, INTERMEDIATE_MULT); // B = (A * B) / INTERMEDIATE_MULL
res = _mm256_loadu_si256(&c_tmp[j + 16]); // Load next temp C
res = _mm256_add_epi16(b, res); // (A*B) + C
_mm256_storeu_si256(&c_tmp[j + 16], res); // store temp C
//c_tmp[j] += A_PART*B[k*ldb + j];
//C[i*ldc + j] += max_abs(A_PART*B[k*ldb + j] / (INTERMEDIATE_MULL), (256 * 128 - 1));
}
int prev_end = (N % 32 == 0) ? (N - 32) : (N / 32) * 32;
for (j = prev_end; j < N; ++j) {
c_tmp[j] += A_PART*B[k*ldb + j] / (INTERMEDIATE_MULT);
}
}
for (j = 0; j < N; ++j) {
C[i*ldc + j] += (c_tmp[j] / FINAL_MULT);
c_tmp[j] = 0;
}
}
free(c_tmp);
}
// 1.15 sec
void gemm_nn_int8_int16(int M, int N, int K, int8_t ALPHA,
int8_t *A, int lda,
int8_t *B, int ldb,
int16_t *C, int ldc)
{
__m256i multyplied_i32, res;
__m256i a, b, d;
__m128i tmp128;
int32_t *c_tmp = calloc(N, sizeof(int32_t));
int i, j, k;
for (i = 0; i < M; ++i) {
for (k = 0; k < K; ++k) {
register int16_t A_PART = ALPHA*A[i*lda + k];
a = _mm256_set1_epi16(A_PART);
for (j = 0; j < N - 32; j += 32) {
int index = k*ldb + j;
d = _mm256_loadu_si256((__m256i*)&B[index]);
tmp128 = _mm256_extractf128_si256(d, 0);// get low 128 bit
b = _mm256_cvtepi8_epi16(tmp128); // int8 -> int16
b = _mm256_mullo_epi16(a, b); // B = A * B
tmp128 = _mm256_extractf128_si256(b, 0); // get low 128 bit
multyplied_i32 = _mm256_cvtepi16_epi32(tmp128); // int16 -> int32
res = _mm256_loadu_si256(&c_tmp[j]); // load temp C
res = _mm256_add_epi32(multyplied_i32, res);// (A*B) + C
_mm256_storeu_si256(&c_tmp[j], res); // store temp C
tmp128 = _mm256_extractf128_si256(b, 1); // get high 128 bit
multyplied_i32 = _mm256_cvtepi16_epi32(tmp128); // int16 -> int32
res = _mm256_loadu_si256(&c_tmp[j + 8]); // Load next temp C
res = _mm256_add_epi32(multyplied_i32, res);// (A*B) + C
_mm256_storeu_si256(&c_tmp[j + 8], res); // store temp C
tmp128 = _mm256_extractf128_si256(d, 1);// get high 128 bit
b = _mm256_cvtepi8_epi16(tmp128); // int8 -> int16 (for low 8 bytes)
b = _mm256_mullo_epi16(a, b); // B = A * B
tmp128 = _mm256_extractf128_si256(b, 0); // get low 128 bit
multyplied_i32 = _mm256_cvtepi16_epi32(tmp128); // int16 -> int32
res = _mm256_loadu_si256(&c_tmp[j + 16]); // Load next temp C
res = _mm256_add_epi32(multyplied_i32, res);// (A*B) + C
_mm256_storeu_si256(&c_tmp[j + 16], res); // store temp C
tmp128 = _mm256_extractf128_si256(b, 1); // get high 128 bit
multyplied_i32 = _mm256_cvtepi16_epi32(tmp128); // int16 -> int32
res = _mm256_loadu_si256(&c_tmp[j + 24]); // Load next temp C
res = _mm256_add_epi32(multyplied_i32, res);// (A*B) + C
_mm256_storeu_si256(&c_tmp[j + 24], res); // store temp C
//c_tmp[j] += A_PART*B[k*ldb + j];
//C[i*ldc + j] += max_abs(A_PART*B[k*ldb + j] / (32), (256 * 128 - 1));
}
int prev_end = (N % 32 == 0) ? (N - 32) : (N / 32) * 32;
for (j = prev_end; j < N; ++j) {
c_tmp[j] += A_PART*B[k*ldb + j];
}
}
for (j = 0; j < N; ++j) {
C[i*ldc + j] += max_abs(c_tmp[j] / (R_MULT), (256 * 128 - 1));
c_tmp[j] = 0;
}
//for (j = 0; j < N; ++j) C[i*ldc + j] += c_tmp[j] / (R_MULT);
}
free(c_tmp);
}
#elif defined(SSE41)
// 1.3 sec
void gemm_nn_int8_int16(int M, int N, int K, int8_t ALPHA,
int8_t *A, int lda,
int8_t *B, int ldb,
int16_t *C, int ldc)
{
__m128i multyplied_i32, res;
__m128i a, b, d;
//c = _mm_set1_epi16(32);
int32_t *c_tmp = calloc(N, sizeof(int32_t));
int i, j, k;
for (i = 0; i < M; ++i) {
for (k = 0; k < K; ++k) {
register int16_t A_PART = ALPHA*A[i*lda + k];
a = _mm_set1_epi16(A_PART);
for (j = 0; j < N - 16; j += 16) {
int index = k*ldb + j;
d = _mm_loadu_si128((__m128i*)&B[index]);
b = _mm_cvtepi8_epi16(d); // int8 -> int16
b = _mm_mullo_epi16(a, b); // B = A * B
multyplied_i32 = _mm_cvtepi16_epi32(b); // int16 -> int32
res = _mm_loadu_si128(&c_tmp[j]); // load temp C
res = _mm_add_epi32(multyplied_i32, res);// (A*B) + C
_mm_store_si128(&c_tmp[j], res); // store temp C
b = _mm_srli_si128(b, 8); // Shift Right -> 8 bytes
multyplied_i32 = _mm_cvtepi16_epi32(b); // int16 -> int32
res = _mm_loadu_si128(&c_tmp[j + 4]); // Load next temp C
res = _mm_add_epi32(multyplied_i32, res);// (A*B) + C
_mm_store_si128(&c_tmp[j + 4], res); // store temp C
d = _mm_srli_si128(d, 8); // Shift Right -> 8 bytes
b = _mm_cvtepi8_epi16(d); // int8 -> int16 (for low 8 bytes)
b = _mm_mullo_epi16(a, b); // B = A * B
multyplied_i32 = _mm_cvtepi16_epi32(b); // int16 -> int32
res = _mm_loadu_si128(&c_tmp[j + 8]); // Load next temp C
res = _mm_add_epi32(multyplied_i32, res);// (A*B) + C
_mm_store_si128(&c_tmp[j + 8], res); // store temp C
b = _mm_srli_si128(b, 8); // Shift Right -> 8 bytes
multyplied_i32 = _mm_cvtepi16_epi32(b); // int16 -> int32
res = _mm_loadu_si128(&c_tmp[j + 12]); // Load next temp C
res = _mm_add_epi32(multyplied_i32, res);// (A*B) + C
_mm_store_si128(&c_tmp[j + 12], res); // store temp C
//c_tmp[j] += A_PART*B[k*ldb + j];
//C[i*ldc + j] += max_abs(A_PART*B[k*ldb + j] / (32), (256 * 128 - 1));
}
int prev_end = (N % 16 == 0) ? (N - 16) : (N / 16) * 16;
for (j = prev_end; j < N; ++j) {
c_tmp[j] += A_PART*B[k*ldb + j];
}
}
for (j = 0; j < N; ++j) {
C[i*ldc + j] += max_abs(c_tmp[j] / (R_MULT), (256 * 128 - 1));
c_tmp[j] = 0;
}
//for (j = 0; j < N; ++j) C[i*ldc + j] += c_tmp[j] / (R_MULT);
}
free(c_tmp);
}
void gemm_nn_int8_int16_conv16(int M, int N, int K, int8_t ALPHA,
int8_t *A, int lda,
int8_t *B, int ldb,
int16_t *C, int ldc)
{
printf(" gemm_nn_int8_int16_conv16() isn't implemented for SSE4.1 \n");
}
#else
// 2.9 sec
void gemm_nn_int8_int16(int M, int N, int K, int8_t ALPHA,
int8_t *A, int lda,
int8_t *B, int ldb,
int16_t *C, int ldc)
{
int32_t *c_tmp = calloc(N, sizeof(int32_t));
int i, j, k;
for (i = 0; i < M; ++i) {
for (k = 0; k < K; ++k) {
register int16_t A_PART = ALPHA*A[i*lda + k];
//#pragma simd parallel for
for (j = 0; j < N; ++j) {
c_tmp[j] += A_PART*B[k*ldb + j];
//C[i*ldc + j] += max_abs(A_PART*B[k*ldb + j] / (R_MULT), (256 * 128 - 1));
}
}
for (j = 0; j < N; ++j) {
C[i*ldc + j] += max_abs(c_tmp[j] / (R_MULT), (256 * 128 - 1));
c_tmp[j] = 0;
}
}
free(c_tmp);
}
void gemm_nn_int8_int32(int M, int N, int K, int8_t ALPHA,
int8_t *A, int lda,
int8_t *B, int ldb,
int32_t *C, int ldc)
{
int32_t *c_tmp = calloc(N, sizeof(int32_t));
int i, j, k;
for (i = 0; i < M; ++i) {
for (k = 0; k < K; ++k) {
register int16_t A_PART = ALPHA*A[i*lda + k];
//#pragma simd parallel for
for (j = 0; j < N; ++j) {
c_tmp[j] += A_PART*B[k*ldb + j];
//C[i*ldc + j] += max_abs(A_PART*B[k*ldb + j] / (R_MULT), (256 * 128 - 1));
}
}
for (j = 0; j < N; ++j) {
C[i*ldc + j] += max_abs(c_tmp[j] / (R_MULT), (256 * 128 - 1));
c_tmp[j] = 0;
}
}
free(c_tmp);
}
void gemm_nn_int8_int16_conv16(int M, int N, int K, int8_t ALPHA,
int8_t *A, int lda,
int8_t *B, int ldb,
int16_t *C, int ldc)
{
printf(" gemm_nn_int8_int16_conv16() isn't implemented \n");
}
#endif // SSE41 or AVX
void forward_convolutional_layer_q(layer l, network_state state)
{
int out_h = (l.h + 2 * l.pad - l.size) / l.stride + 1; // output_height=input_height for stride=1 and pad=1
int out_w = (l.w + 2 * l.pad - l.size) / l.stride + 1; // output_width=input_width for stride=1 and pad=1
int i, f, j;
int const out_size = out_h*out_w;
size_t const weights_size = l.size*l.size*l.c*l.n;
// fill zero (ALPHA)
//for (i = 0; i < l.outputs; ++i) l.output[i] = 0;
// l.n - number of filters on this layer
// l.c - channels of input-array
// l.h - height of input-array
// l.w - width of input-array
// l.size - width and height of filters (the same size for all filters)
//draw_distribution(l.weights, weights_size, NULL);
//draw_distribution(state.input, l.inputs, NULL);
typedef int16_t conv_t; // l.output
conv_t *output_q = calloc(l.outputs, sizeof(conv_t));
state.input_int8 = (int *)calloc(l.inputs, sizeof(int));
int z;
for (z = 0; z < l.inputs; ++z) {
//int16_t src = lround(state.input[k] * net.layers[0].input_quant_multipler);
int16_t src = state.input[z] * l.input_quant_multipler;
state.input_int8[z] = max_abs(src, I_MAX_VAL);
}
////////////////////////////////////
// cudnnConvolutionBiasActivationForward()
// y = act ( alpha1 * conv(x) + alpha2 * z + bias )
// int8 = activation( float * conv(int8) + float * int8 + float )
// int8 = activation( conv(input_int8) + bias_float ) // X_INT8x4 or X_INT8
// https://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html#cudnnConvolutionBiasActivationForward
///////////////////////////////////
// 1. Convolution !!!
int fil;
// cuDNN: y = conv(x)
int m = l.n;
int k = l.size*l.size*l.c;
int n = out_h*out_w;
int8_t *a = l.weights_int8;
int8_t *b = (int8_t *)state.workspace;
conv_t *c = output_q; // int16_t
// convolution as GEMM (as part of BLAS)
//for (i = 0; i < l.batch; ++i) {
im2col_cpu_int8(state.input_int8, l.c, l.h, l.w, l.size, l.stride, l.pad, b); // here
//gemm_nn_int8_int16(m, n, k, 1, a, k, b, n, c, n); // single-thread gemm
int t; // multi-thread gemm
#pragma omp parallel for
for (t = 0; t < m; ++t) {
gemm_nn_int8_int16(1, n, k, 1, a + t*k, k, b, n, c + t*n, n);
//gemm_nn_int8_int16_conv16(1, n, k, 1, a + t*k, k, b, n, c + t*n, n);
//gemm_nn_int8_int32(1, n, k, 1, a + t*k, k, b, n, c + t*n, n); conv_t should be int32_t
}
//}
free(state.input_int8);
float ALPHA1 = R_MULT / (l.input_quant_multipler * l.weights_quant_multipler);
// cuDNN: y = alpha1 * conv(x)
for (i = 0; i < l.outputs; ++i) {
l.output[i] = output_q[i] * ALPHA1; // cuDNN: alpha1
}
//for (fil = 0; fil < l.n; ++fil) {
// for (j = 0; j < out_size; ++j) {
// l.output[fil*out_size + j] = l.output[fil*out_size + j] * ALPHA1;
// }
//}
// cuDNN: y = alpha1 * conv(x) + bias
for (fil = 0; fil < l.n; ++fil) {
for (j = 0; j < out_size; ++j) {
l.output[fil*out_size + j] += l.biases[fil];
}
}
//draw_distribution(l.output, l.outputs, "output");
// cuDNN: y = act ( alpha1 * conv(x) + bias )
// bias is always FLOAT
if (l.activation == LEAKY) {
for (i = 0; i < l.n*out_size; ++i) {
l.output[i] = (l.output[i]>0) ? l.output[i] : l.output[i] / 10; //leaky_activate(l.output[i]);
}
}
free(output_q);
}
// 4 layers in 1: convolution, batch-normalization, BIAS and activation
void forward_convolutional_layer_q_old(layer l, network_state state, int return_float)
{
int out_h = (l.h + 2 * l.pad - l.size) / l.stride + 1; // output_height=input_height for stride=1 and pad=1
int out_w = (l.w + 2 * l.pad - l.size) / l.stride + 1; // output_width=input_width for stride=1 and pad=1
int i, f, j;
int const out_size = out_h*out_w;
size_t const weights_size = l.size*l.size*l.c*l.n;
// fill zero (ALPHA)
//for (i = 0; i < l.outputs; ++i) l.output[i] = 0;
// l.n - number of filters on this layer
// l.c - channels of input-array
// l.h - height of input-array
// l.w - width of input-array
// l.size - width and height of filters (the same size for all filters)
//draw_distribution(l.weights, weights_size, NULL);
//draw_distribution(state.input, l.inputs, NULL);
typedef int16_t conv_t; // l.output
conv_t *output_q = calloc(l.outputs, sizeof(conv_t));
////////////////////////////////////
// cudnnConvolutionBiasActivationForward()
// y = act ( alpha1 * conv(x) + alpha2 * z + bias )
// int8 = activation( float * conv(int8) + float * int8 + float )
// int8 = activation( conv(input_int8) + bias_float ) // X_INT8x4 or X_INT8
// https://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html#cudnnConvolutionBiasActivationForward
///////////////////////////////////
// 1. Convolution !!!
#ifndef GEMMCONV
int fil;
// filter index
#pragma omp parallel for // "omp parallel for" - automatic parallelization of loop by using OpenMP
for (fil = 0; fil < l.n; ++fil) {
int chan, y, x, f_y, f_x;
// channel index
for (chan = 0; chan < l.c; ++chan)
// input - y
for (y = 0; y < l.h; ++y)
// input - x
for (x = 0; x < l.w; ++x)
{
int const output_index = fil*l.w*l.h + y*l.w + x;
int const weights_pre_index = fil*l.c*l.size*l.size + chan*l.size*l.size;
int const input_pre_index = chan*l.w*l.h;
//float sum = 0;
//int16_t sum = 0;
int32_t sum = 0;
//conv_t sum = 0;
// filter - y
for (f_y = 0; f_y < l.size; ++f_y)
{
int input_y = y + f_y - l.pad;
// filter - x
for (f_x = 0; f_x < l.size; ++f_x)
{
int input_x = x + f_x - l.pad;
if (input_y < 0 || input_x < 0 || input_y >= l.h || input_x >= l.w) continue;
int input_index = input_pre_index + input_y*l.w + input_x;
int weights_index = weights_pre_index + f_y*l.size + f_x;
//sum += state.input[input_index] * l.weights[weights_index];
// int16 += int8 * int8;
sum += (int32_t)state.input_int8[input_index] * (int32_t)l.weights_int8[weights_index];
}
}
// l.output[filters][width][height] +=
// state.input[channels][width][height] *
// l.weights[filters][channels][filter_width][filter_height];
//output_q[output_index] += max_abs(sum, R_MAX_VAL);
output_q[output_index] += max_abs(sum / R_MULT, R_MAX_VAL);
//output_q[output_index] += sum / R_MULT;
//if (fabs(output_q[output_index]) > 65535) printf(" fabs(output_q[output_index]) > 65535 \n");
}
}
#else
int fil;
// cuDNN: y = conv(x)
int m = l.n;
int k = l.size*l.size*l.c;
int n = out_h*out_w;
int8_t *a = l.weights_int8;
int8_t *b = (int8_t *)state.workspace;
conv_t *c = output_q; // int16_t
// convolution as GEMM (as part of BLAS)
//for (i = 0; i < l.batch; ++i) {
im2col_cpu_int8(state.input_int8, l.c, l.h, l.w, l.size, l.stride, l.pad, b); // here
//gemm_nn_int8_int16(m, n, k, 1, a, k, b, n, c, n); // single-thread gemm
int t; // multi-thread gemm
#pragma omp parallel for
for (t = 0; t < m; ++t) {
gemm_nn_int8_int16(1, n, k, 1, a + t*k, k, b, n, c + t*n, n);
//gemm_nn_int8_int16_conv16(1, n, k, 1, a + t*k, k, b, n, c + t*n, n);
//gemm_nn_int8_int32(1, n, k, 1, a + t*k, k, b, n, c + t*n, n); conv_t should be int32_t
}
//}
#endif
// cuDNN: y = alpha1 * conv(x)
//for (i = 0; i < l.outputs; ++i) {
// output_q[i] = output_q[i] * l.output_multipler; // cuDNN: alpha1
//}
for (fil = 0; fil < l.n; ++fil) {
for (j = 0; j < out_size; ++j) {
output_q[fil*out_size + j] = output_q[fil*out_size + j] * l.output_multipler;
}
}
// cuDNN: y = alpha1 * conv(x) + bias
for (fil = 0; fil < l.n; ++fil) {
for (j = 0; j < out_size; ++j) {
output_q[fil*out_size + j] += l.biases_quant[fil];
}
}
//for (i = 0; i < l.inputs; ++i) state.input[i] = state.input_int8[i];
//char buff[1024];
//sprintf(buff, "inputs - filters %d", l.n);
//draw_distribution(state.input, l.inputs, buff);
//for (i = 0; i < l.outputs; ++i) l.output[i] = (float)output_q[i];
//draw_distribution(l.output, l.outputs, "output");
// cuDNN: y = act ( alpha1 * conv(x) + bias )
// bias is always FLOAT
if (l.activation == LEAKY) {
for (i = 0; i < l.n*out_size; ++i) {
output_q[i] = (output_q[i]>0) ? output_q[i] : output_q[i] / 10; //leaky_activate(l.output[i]);
}
}
// cuDNN: y = act ( alpha1 * conv(x) + alpha2 * z + bias ), where: alpha2=0, z=NULL
if (return_float) {
// y - FLOAT, x,w - X_INT8 / X_INT8x4
for (i = 0; i < l.outputs; ++i) {
l.output[i] = (float)output_q[i] / 16.F; // /8 // float32 // 15.769
}
}
else
{
// y - X_INT8 / X_INT8x4, x,w - X_INT8 / X_INT8x4
for (i = 0; i < l.outputs; ++i) {
l.output_int8[i] = max_abs(output_q[i], I_MAX_VAL); // int8
}
}
free(output_q);
}
#define MIN_INT8 -128
// MAX pooling layer
void forward_maxpool_layer_q(const layer l, network_state state)
{
int b, i, j, k, m, n;
int w_offset = -l.pad;
int h_offset = -l.pad;
int h = l.out_h;
int w = l.out_w;
int c = l.c;
// batch index
for (b = 0; b < l.batch; ++b) {
// channel index
for (k = 0; k < c; ++k) {
// y - input
for (i = 0; i < h; ++i) {
// x - input
for (j = 0; j < w; ++j) {
int out_index = j + w*(i + h*(k + c*b));
int8_t max = MIN_INT8;
int max_i = -1;
// pooling x-index
for (n = 0; n < l.size; ++n) {
// pooling y-index
for (m = 0; m < l.size; ++m) {
int cur_h = h_offset + i*l.stride + n;
int cur_w = w_offset + j*l.stride + m;
int index = cur_w + l.w*(cur_h + l.h*(k + b*l.c));
int valid = (cur_h >= 0 && cur_h < l.h &&
cur_w >= 0 && cur_w < l.w);
int8_t val = (valid != 0) ? state.input_int8[index] : MIN_INT8;
max_i = (val > max) ? index : max_i; // get max index
max = (val > max) ? val : max; // get max value
}
}
//l.output[out_index] = max; // store max value
l.output_int8[out_index] = max; // store max value
l.indexes[out_index] = max_i; // store max index
}
}
}
}
}
// Route layer - just copy 1 or more layers into the current layer
void forward_route_layer_q(const layer l, network_state state)
{
int i, j;
int offset = 0;
// number of merged layers
for (i = 0; i < l.n; ++i) {
int index = l.input_layers[i]; // source layer index
//float *input = state.net.layers[index].output; // source layer output ptr
int8_t *input = state.net.layers[index].output_int8; // source layer output ptr
int input_size = l.input_sizes[i]; // source layer size
// batch index
for (j = 0; j < l.batch; ++j) {
memcpy(l.output_int8 + offset + j*l.outputs, input + j*input_size, input_size * sizeof(int8_t));
}
offset += input_size;
}
}
// Reorg layer - just change dimension sizes of the previous layer (some dimension sizes are increased by decreasing other)
void forward_reorg_layer_q(const layer l, network_state state)
{
//float *out = l.output;
//float *x = state.input;
int8_t *out = l.output_int8;
int8_t *x = state.input_int8;
int out_w = l.out_w;
int out_h = l.out_h;
int out_c = l.out_c;
int batch = l.batch;
int stride = l.stride;
int b, i, j, k;
int in_c = out_c / (stride*stride);
int out_w_X_stride = out_w*stride;
int out_h_X_stride = out_h*stride;
//printf("\n out_c = %d, out_w = %d, out_h = %d, stride = %d, forward = %d \n", out_c, out_w, out_h, stride, forward);
//printf(" in_c = %d, in_w = %d, in_h = %d \n", in_c, out_w*stride, out_h*stride);
// batch
for (b = 0; b < batch; ++b) {
// channel
for (k = 0; k < out_c; ++k) {
int c2 = k % in_c;
int pre_out_index = out_h_X_stride*(c2 + in_c*b);
int offset = k / in_c;
int offset_mod_stride = offset % stride;
int offset_div_stride = offset / stride;
// y
for (j = 0; j < out_h; ++j) {
int pre_in_index = out_w*(j + out_h*(k + out_c*b));
// x
for (i = 0; i < out_w; ++i) {
int in_index = i + pre_in_index;
int w2 = i*stride + offset_mod_stride;
int h2 = j*stride + offset_div_stride;
int out_index = w2 + out_w_X_stride*(h2 + pre_out_index);
out[in_index] = x[out_index];
}
}
}
}
}
// ---- region layer ----
static void softmax_q(float *input, int n, float temp, float *output)
{
int i;
float sum = 0;
float largest = -FLT_MAX;
for (i = 0; i < n; ++i) {
if (input[i] > largest) largest = input[i];
}
for (i = 0; i < n; ++i) {
float e = expf(input[i] / temp - largest / temp);
sum += e;
output[i] = e;
}
for (i = 0; i < n; ++i) {
output[i] /= sum;
}
}
static void softmax_tree(float *input, int batch, int inputs, float temp, tree *hierarchy, float *output)
{
int b;
for (b = 0; b < batch; ++b) {
int i;
int count = 0;
for (i = 0; i < hierarchy->groups; ++i) {
int group_size = hierarchy->group_size[i];
softmax_q(input + b*inputs + count, group_size, temp, output + b*inputs + count);
count += group_size;
}
}
}
// ---
// Region layer - just change places of array items, then do logistic_activate and softmax
void forward_region_layer_q(const layer l, network_state state)
{
int i, b;
int size = l.coords + l.classes + 1; // 4 Coords(x,y,w,h) + Classes + 1 Probability-t0
//printf("\n l.coords = %d \n", l.coords);
memcpy(l.output, state.input, l.outputs*l.batch * sizeof(float));
//flatten(l.output, l.w*l.h, size*l.n, l.batch, 1);
// convert many channels to the one channel (depth=1)
// (each grid cell will have a number of float-variables equal = to the initial number of channels)
{
float *x = l.output;
int layer_size = l.w*l.h; // W x H - size of layer
int layers = size*l.n; // number of channels (where l.n = number of anchors)
int batch = l.batch;
float *swap = calloc(layer_size*layers*batch, sizeof(float));
int i, c, b;
// batch index
for (b = 0; b < batch; ++b) {
// channel index
for (c = 0; c < layers; ++c) {
// layer grid index
for (i = 0; i < layer_size; ++i) {
int i1 = b*layers*layer_size + c*layer_size + i;
int i2 = b*layers*layer_size + i*layers + c;
swap[i2] = x[i1];
}
}
}
memcpy(x, swap, layer_size*layers*batch * sizeof(float));
free(swap);
}
// logistic activation only for: t0 (where is t0 = Probability * IoU(box, object))
for (b = 0; b < l.batch; ++b) {
// for each item (x, y, anchor-index)
for (i = 0; i < l.h*l.w*l.n; ++i) {
int index = size*i + b*l.outputs;
float x = l.output[index + 4];
l.output[index + 4] = 1.0F / (1.0F + expf(-x)); // logistic_activate_q(l.output[index + 4]);
}
}
if (l.softmax_tree) { // Yolo 9000
for (b = 0; b < l.batch; ++b) {
for (i = 0; i < l.h*l.w*l.n; ++i) {
int index = size*i + b*l.outputs;
softmax_tree(l.output + index + 5, 1, 0, 1, l.softmax_tree, l.output + index + 5);
}
}
}
else if (l.softmax) { // Yolo v2
// softmax activation only for Classes probability
for (b = 0; b < l.batch; ++b) {
// for each item (x, y, anchor-index)
//#pragma omp parallel for
for (i = 0; i < l.h*l.w*l.n; ++i) {
int index = size*i + b*l.outputs;
softmax_q(l.output + index + 5, l.classes, 1, l.output + index + 5);
}
}
}
}
void yolov2_forward_network_q(network net, network_state state)
{
state.workspace = net.workspace;
int i, k;
for (i = 0; i < net.n; ++i) {
state.index = i;
layer l = net.layers[i];
if (l.type == CONVOLUTIONAL) {
if (i >= 1 && l.activation != LINEAR) forward_convolutional_layer_q(l, state);
else forward_convolutional_layer_cpu(l, state);
printf("\n %d - CONVOLUTIONAL \t\t l.size = %d \n", i, l.size);
}
else if (l.type == MAXPOOL) {
forward_maxpool_layer_cpu(l, state);
//printf("\n MAXPOOL \t\t l.size = %d \n", l.size);
}
else if (l.type == ROUTE) {
forward_route_layer_cpu(l, state);
//printf("\n ROUTE \t\t\t l.n = %d \n", l.n);
}
else if (l.type == REORG) {
forward_reorg_layer_cpu(l, state);
//printf("\n REORG \n");
}
else if (l.type == UPSAMPLE) {
forward_upsample_layer_cpu(l, state);
//printf("\n UPSAMPLE \n");
}
else if (l.type == SHORTCUT) {
forward_shortcut_layer_cpu(l, state);
//printf("\n SHORTCUT \n");
}
else if (l.type == YOLO) {
forward_yolo_layer_cpu(l, state);
//printf("\n YOLO \n");
}
else if (l.type == REGION) {
forward_region_layer_cpu(l, state);
//printf("\n REGION \n");
}
else {
printf("\n layer: %d \n", l.type);
}
state.input = l.output;
//state.input_int8 = l.output_int8;
/*
if (i == 0) {
//draw_distribution(state.input, l.outputs, NULL);
int k;
for (k = 0; k < l.out_w*l.out_h*l.out_c; ++k) {
int16_t src = state.input[k] * 3.88677;// *net.layers[2].input_quant_multipler;
state.input_int8[k] = max_abs(src, I_MAX_VAL);
//printf(" %d, ", src);
}
}
*/
}
}
void yolov2_forward_network_q_old(network net, network_state state)
{
state.workspace = net.workspace;
int i, k;
for (i = 0; i < net.n; ++i) {
state.index = i;
layer l = net.layers[i];
if (l.type == CONVOLUTIONAL) {
int return_float = (net.layers[i+1].activation == LINEAR); // if next layer has LINEAR activation
if (i >= 1 && l.activation != LINEAR) forward_convolutional_layer_q_old(l, state, return_float);
else forward_convolutional_layer_cpu(l, state);
printf("\n %d - CONVOLUTIONAL \t\t l.size = %d \n", i, l.size);
}
else if (l.type == MAXPOOL) {
forward_maxpool_layer_q(l, state);
//printf("\n MAXPOOL \t\t l.size = %d \n", l.size);
}
else if (l.type == ROUTE) {
forward_route_layer_q(l, state);
//printf("\n ROUTE \t\t\t l.n = %d \n", l.n);
}
else if (l.type == REORG) {
forward_reorg_layer_q(l, state);
//printf("\n REORG \n");
}
/*
else if (l.type == UPSAMPLE) {
forward_upsample_layer_cpu(l, state);
//printf("\n UPSAMPLE \n");
}
else if (l.type == SHORTCUT) {
forward_shortcut_layer_cpu(l, state);
//printf("\n SHORTCUT \n");
}
else if (l.type == YOLO) {
forward_yolo_layer_cpu(l, state);
//printf("\n YOLO \n");
}
*/
else if (l.type == REGION) {
forward_region_layer_q(l, state);
//printf("\n REGION \n");
}
else {
printf("\n layer: %d \n", l.type);
}
state.input = l.output;
state.input_int8 = l.output_int8;
if (i == 0) {
//draw_distribution(state.input, l.outputs, NULL);
int k;
for (k = 0; k < l.out_w*l.out_h*l.out_c; ++k) {
int16_t src = state.input[k] * 3.88677;// *net.layers[2].input_quant_multipler;
state.input_int8[k] = max_abs(src, I_MAX_VAL);
//printf(" %d, ", src);
}
}
}
}
// detect on CPU
float *network_predict_quantized(network net, float *input)
{
network_state state;
state.net = net;
state.index = 0;
state.input = input;
//state.input_int8 = calloc(net.w*net.h*net.c, sizeof(int8_t));
state.truth = 0;
state.train = 0;
state.delta = 0;
/*/
int k;
for (k = 0; k < net.w*net.h*net.c; ++k) {
//int16_t src = lround(state.input[k] * net.layers[0].input_quant_multipler);
int16_t src = state.input[k] * net.layers[0].input_quant_multipler;
state.input_int8[k] = max_abs(src, I_MAX_VAL);
}
*/
yolov2_forward_network_q(net, state); // network on CPU
//float *out = get_network_output(net);
int i;
for (i = net.n - 1; i > 0; --i) if (net.layers[i].type != COST) break;
//free(state.input_int8);
return net.layers[i].output;
}
// detect on CPU
float *network_predict_quantized_old(network net, float *input)
{
network_state state;
state.net = net;
state.index = 0;
state.input = input;
state.input_int8 = calloc(net.w*net.h*net.c, sizeof(int8_t));
state.truth = 0;
state.train = 0;
state.delta = 0;
int k;
for (k = 0; k < net.w*net.h*net.c; ++k) {
//int16_t src = lround(state.input[k] * net.layers[0].input_quant_multipler);
int16_t src = state.input[k] * net.layers[0].input_quant_multipler;
state.input_int8[k] = max_abs(src, I_MAX_VAL);
}
yolov2_forward_network_q_old(net, state); // network on CPU
//float *out = get_network_output(net);
int i;
for (i = net.n - 1; i > 0; --i) if (net.layers[i].type != COST) break;
free(state.input_int8);
return net.layers[i].output;
}
// --------------------
// x - last conv-layer output
// biases - anchors from cfg-file
// n - number of anchors from cfg-file
box get_region_box_q(float *x, float *biases, int n, int index, int i, int j, int w, int h)
{
box b;
b.x = (i + logistic_activate(x[index + 0])) / w; // (col + 1./(1. + exp(-x))) / width_last_layer
b.y = (j + logistic_activate(x[index + 1])) / h; // (row + 1./(1. + exp(-x))) / height_last_layer
b.w = expf(x[index + 2]) * biases[2 * n] / w; // exp(x) * anchor_w / width_last_layer
b.h = expf(x[index + 3]) * biases[2 * n + 1] / h; // exp(x) * anchor_h / height_last_layer
return b;
}
// get prediction boxes
void get_region_boxes_q(layer l, int w, int h, float thresh, float **probs, box *boxes, int only_objectness, int *map)
{
int i, j, n;
float *predictions = l.output;
// grid index
for (i = 0; i < l.w*l.h; ++i) {
int row = i / l.w;
int col = i % l.w;
// anchor index
for (n = 0; n < l.n; ++n) {
int index = i*l.n + n; // index for each grid-cell & anchor
int p_index = index * (l.classes + 5) + 4;
float scale = predictions[p_index]; // scale = t0 = Probability * IoU(box, object)
if (l.classfix == -1 && scale < .5) scale = 0; // if(t0 < 0.5) t0 = 0;
int box_index = index * (l.classes + 5);
boxes[index] = get_region_box_q(predictions, l.biases, n, box_index, col, row, l.w, l.h);
boxes[index].x *= w;
boxes[index].y *= h;
boxes[index].w *= w;
boxes[index].h *= h;
int class_index = index * (l.classes + 5) + 5;
// Yolo 9000 or Yolo v2
if (l.softmax_tree) {
// Yolo 9000
hierarchy_predictions(predictions + class_index, l.classes, l.softmax_tree, 0);
int found = 0;
if (map) {
for (j = 0; j < 200; ++j) {
float prob = scale*predictions[class_index + map[j]];
probs[index][j] = (prob > thresh) ? prob : 0;
}
}
else {
for (j = l.classes - 1; j >= 0; --j) {
if (!found && predictions[class_index + j] > .5) {
found = 1;
}
else {
predictions[class_index + j] = 0;
}
float prob = predictions[class_index + j];
probs[index][j] = (scale > thresh) ? prob : 0;
}
}
}
else
{
// Yolo v2
for (j = 0; j < l.classes; ++j) {
float prob = scale*predictions[class_index + j]; // prob = IoU(box, object) = t0 * class-probability
probs[index][j] = (prob > thresh) ? prob : 0; // if (IoU < threshold) IoU = 0;
}
}
if (only_objectness) {
probs[index][0] = scale;
}
}
}
}
float entropy_calibration(float *src_arr, const size_t size, const float bin_width, const int max_bin)
{
//const float bin_width = 1.0 / 4096;// 1.0F / 64.0F;
//const int max_bin = 2048*2;// 2048;
const int max_global_val = max_bin * bin_width; // 1024 // 32
float *m_array = (float*)calloc(max_bin, sizeof(float));
float *H_histogram = (float*)calloc(max_bin, sizeof(float));
float *P_array = (float*)calloc(max_bin, sizeof(float));
float *Q_array = (float*)calloc(max_bin, sizeof(float));
float *quant_Q_array = (float*)calloc(128, sizeof(float)); // 128 for INT8
uint64_t *quant_Q_array_count = (uint64_t*)calloc(128, sizeof(uint64_t)); // 128 for INT8
int i, j;
{
//uint64_t outliers = 0;
const int last_bin = max_bin - 1;
for (j = 0; j <= last_bin; ++j) P_array[j] = 0;
for (j = 0; j < size; ++j) {
int bin_num = lround(fabs(src_arr[j]) / bin_width);
int bin_num_saturated = (bin_num >= last_bin) ? last_bin : bin_num;
H_histogram[bin_num_saturated]++;
//if (bin_num > last_bin) outliers++;
//else H_histogram[bin_num]++;
}
}
for (i = 128; i < max_bin; ++i) { // [1/64; 1024] // [1/64; 32]
//if (i > max_bin) printf(" i > max_bin = %d, ", i);
//printf(" %d \r", i);
// calculate bin histogram
uint64_t outliers = 0;
const int last_bin = i - 1;
for (j = 0; j <= last_bin; ++j) P_array[j] = 0;
/*for (j = 0; j < size; ++j) {
int bin_num = lround(fabs(src_arr[j]) / bin_width);
//int bin_num_saturated = (bin_num >= last_bin) ? last_bin : bin_num;
if (bin_num > last_bin) outliers++;
else P_array[bin_num]++;
}*/
for (j = 0; j < max_bin; ++j) {
if (j <= last_bin) P_array[j] = H_histogram[j];
else outliers += H_histogram[j];
}
// quantinization P-i-bins to Q-128-bins
const float quant_expand_width = i / 128.0F;
for (j = 0; j < 128; ++j) quant_Q_array[j] = 0, quant_Q_array_count[j] = 0;
for (j = 0; j < i; ++j) {
int quant_bin = lround(j / quant_expand_width);
if (quant_bin > 127) quant_bin = 127; // printf(" quant_bin > 127 = %d \n", quant_bin);
quant_Q_array[quant_bin] += P_array[j];
if (P_array[j] != 0) quant_Q_array_count[quant_bin]++;
}
// expand 128-bins to i-bins
for (j = 0; j < i; ++j) Q_array[j] = 0;
for (j = 0; j < i; ++j) {
int quant_bin = lround(j / quant_expand_width);
if (quant_bin > 127) quant_bin = 127;// printf(" quant_bin > 127 = %d \n", quant_bin);
//Q_array[j] = llround(quant_Q_array[quant_bin] / quant_expand_width);
if (P_array[j] != 0) // preserve empty bins from original P
Q_array[j] = quant_Q_array[quant_bin] / quant_Q_array_count[quant_bin];
//printf(" quant_bin = %d, Q[j] = %f = q_Q %f / q_w %f, P = %f \n", quant_bin, Q_array[j], quant_Q_array[quant_bin], quant_expand_width, P_array[j]);
}
P_array[last_bin] += outliers; // saturation
// P /= SUM(P); Q /= SUM(Q);
float sum_P = 0, sum_Q = 0, quant_sum_Q = 0;
for (j = 0; j < 128; ++j) quant_sum_Q += quant_Q_array[j];
for (j = 0; j < i; ++j) {
sum_P += P_array[j];
sum_Q += Q_array[j];
//printf(" P_array = %f, Q_array = %f \n", P_array[j], Q_array[j]);
}
for (j = 0; j < i; ++j) {
P_array[j] /= sum_P;
Q_array[j] /= sum_Q;
}
// KL_divergence(P, Q);
for (j = 0; j < i; ++j) {
m_array[i] += P_array[j] * (log((P_array[j] + FLT_MIN) / (Q_array[j] + FLT_MIN)));
//printf(" p = %f, q = %f, p/q = %f, log(p/q) = %f, m = %f \n", P_array[j], Q_array[j], P_array[j] / Q_array[j], log((P_array[j] + FLT_MIN) / (Q_array[j] + FLT_MIN)), m_array[i]);
}
//printf("\n i = %d, size = %zu, sum_P = %f, sum_Q = %f, q_sum_Q = %f, q_e_width = %f, m = %f \n", i, size, sum_P, sum_Q, quant_sum_Q, quant_expand_width, m_array[i]);
//getchar();
}
float m_index = 128, min_m = FLT_MAX;
for (i = 128; i < max_bin; ++i) {
if (m_array[i] < min_m) {
min_m = m_array[i];
m_index = i;
}
}
float threshold = (m_index + 0.5) * bin_width;
float multiplier = 127 / threshold;
printf(" mult = %g, threshold = %g, min_m = %g, m_index = %g \n", multiplier, threshold, min_m, m_index);
free(H_histogram);
free(P_array);
free(Q_array);
free(quant_Q_array);
free(quant_Q_array_count);
free(m_array);
//getchar();
return multiplier;
}
// Quantinization and get multiplers for convolutional weights for quantinization
void quantinization_and_get_multipliers(network net)
{
// ----------- entropy_calibration(,, 1.0 / 16, 4096); - FULL ----------------------
//float input_mult[] = { 256, 4,32,64,32,32,32,32,32,64,64,64,64,64,128,64,128,128,64,128,64,128,128 }; // divided 4 - full works
int counter = 0;
//const int input_mult_size = sizeof(input_mult) / sizeof(float);
int j;
for (j = 0; j < net.n; ++j) {
layer *l = &net.layers[j];
if (l->type == CONVOLUTIONAL) {
size_t const weights_size = l->size*l->size*l->c*l->n;
size_t const filter_size = l->size*l->size*l->c;
int i, k, fil;
// get optimal multipliers - for Weights
//float *weights_multiplier = (float *)calloc(l->n, sizeof(float));
//l->output_multipler = (float *)calloc(l->n, sizeof(float));
//float weights_multiplier_single = entropy_calibration(l->weights, weights_size, 1.0 / (2048), (2048));
//float weights_multiplier_single = entropy_calibration(l->weights, weights_size, 1.0 / 4096, 4096) / 2;
//if (j == 0) weights_multiplier_single = entropy_calibration(l->weights, weights_size, 1.0 / 2, 2048);
float old_weight_mult = get_multiplier(l->weights, weights_size, 8) / 4; // good [2 - 8], best 4
float weights_multiplier_single = old_weight_mult;
//float old_weight_mult = get_multiplier(l->weights, weights_size, 7) / 4;
printf(" old_weight_mult = %f, weights_multiplier_single = %f \n\n", old_weight_mult, weights_multiplier_single);
//weights_multiplier_single = old_weight_mult;
l->weights_quant_multipler = weights_multiplier_single;
for (fil = 0; fil < l->n; ++fil) {
for (i = 0; i < filter_size; ++i) {
float w = l->weights[fil*filter_size + i] * l->weights_quant_multipler;// [fil];
l->weights_int8[fil*filter_size + i] = max_abs(w, W_MAX_VAL);
//l->weights_int8[fil*filter_size + i] = max_abs(lround(w), W_MAX_VAL);
}
}
if (counter >= net.input_calibration_size) {
printf("\n Warning: input_calibration= in the cfg-file has less values %d than convolutional layers %d \n",
net.input_calibration_size, counter);
}
//l->input_quant_multipler = 40;//(counter < net.input_calibration_size) ? net.input_calibration[counter] : 16; // best 40
l->input_quant_multipler = (counter < net.input_calibration_size) ? net.input_calibration[counter] : 40;
++counter;
//float current_input_mult = 40;//(counter < net.input_calibration_size) ? net.input_calibration[counter] : 16;
float current_input_mult = (counter < net.input_calibration_size) ? net.input_calibration[counter] : 40;
for (fil = 0; fil < l->n; ++fil) {
if (counter == 1) l->output_multipler = current_input_mult / (l->weights_quant_multipler * l->input_quant_multipler / R_MULT);
if (counter == 2) l->output_multipler = current_input_mult / (l->weights_quant_multipler * l->input_quant_multipler / R_MULT);
else if (counter >= 2) l->output_multipler = current_input_mult / (l->weights_quant_multipler * l->input_quant_multipler / R_MULT);
}
// quantinization Biases
for (fil = 0; fil < l->n; ++fil) {
// calculate optimal multipliers - for Biases
float biases_multipler = (l->output_multipler * l->weights_quant_multipler * l->input_quant_multipler / R_MULT);
l->biases_quant[fil] = l->biases[fil] * biases_multipler;
}
printf(" Multiplers: weights %g, input %g, output %g \n",
l->weights_quant_multipler, l->input_quant_multipler, l->output_multipler);
}
else {
printf(" Skip layer: %d \n", l->type);
}
}
#ifdef GPU
// init weights and cuDNN for quantized IINT8x4
init_gpu_int8x4(net);
#endif //GPU
}
|
terrain.c | #include "blocko.h"
float hmap[TILESW][TILESD];
float hmap2[TILESW][TILESD];
int tscootx, tscootz, tchunk_scootx, tchunk_scootz;
void gen_hmap(int x0, int x2, int z0, int z2)
{
unsigned seed = SEED4(x0, x2, z0, z2);
// pick corners if they aren't set
if (hmap[x0][z0] == 0) hmap[x0][z0] = RANDI(64, 127);
if (hmap[x0][z2] == 0) hmap[x0][z2] = RANDI(64, 127);
if (hmap[x2][z0] == 0) hmap[x2][z0] = RANDI(64, 127);
if (hmap[x2][z2] == 0) hmap[x2][z2] = RANDI(64, 127);
int x1 = (x0 + x2) / 2;
int z1 = (z0 + z2) / 2;
int w = (x2 - x0) / 4;
int d = (z2 - z0) / 4;
w = w ? w : 1;
d = d ? d : 1;
float d2 = d / 2.f;
float r = w > 2 ? 1.f : 0.f;
// edges middles
if (!hmap[x0][z1])
hmap[x0][z1] = (hmap[x0][z0] + hmap[x0][z2]) / 2.f + r * RANDF(-d2, d2);
if (!hmap[x2][z1])
hmap[x2][z1] = (hmap[x2][z0] + hmap[x2][z2]) / 2.f + r * RANDF(-d2, d2);
if (!hmap[x1][z0])
hmap[x1][z0] = (hmap[x0][z0] + hmap[x2][z0]) / 2.f + r * RANDF(-d2, d2);
if (!hmap[x1][z2])
hmap[x1][z2] = (hmap[x0][z2] + hmap[x2][z2]) / 2.f + r * RANDF(-d2, d2);
// middle middle
hmap[x1][z1] = (hmap[x0][z1] + hmap[x2][z1] + hmap[x1][z0] + hmap[x1][z2]) / 4.f + r * RANDF(-d, d);
// recurse if there are any unfilled spots
if(x1 - x0 > 1 || x2 - x1 > 1 || z1 - z0 > 1 || z2 - z1 > 1)
{
gen_hmap(x0, x1, z0, z1);
gen_hmap(x0, x1, z1, z2);
gen_hmap(x1, x2, z0, z1);
gen_hmap(x1, x2, z1, z2);
}
}
void smooth_hmap()
{
for (int x = 0; x < TILESW; x++) for (int z = 0; z < TILESD; z++)
{
float p365 = noise(x, 0, -z, 365);
int radius = p365 < 0.0f ? 3 :
p365 < 0.2f ? 2 : 1;
int x0 = x - radius;
int x1 = x + radius + 1;
int z0 = z - radius;
int z1 = z + radius + 1;
CLAMP(x0, 0, TILESW-1);
CLAMP(x1, 0, TILESW-1);
CLAMP(z0, 0, TILESD-1);
CLAMP(z1, 0, TILESD-1);
int sum = 0, n = 0;
for (int i = x0; i < x1; i++) for (int j = z0; j < z1; j++)
{
sum += hmap[i][j];
n++;
}
int res = sum / n;
float p800 = noise(x, 0, z, 800);
float p777 = noise(z, 0, x, 777);
float p301 = noise(x, 0, z, 301);
float p204 = noise(x, 0, z, 204);
float p33 = noise(x, 0, z, 32 * (1.1 + p301));
float swoosh = p33 > 0.3 ? (10 - 30 * (p33 - 0.3)) : 0;
float times = (p204 * 20.f) + 30.f;
float plus = (-p204 * 40.f) + 60.f;
CLAMP(times, 20.f, 40.f);
CLAMP(plus, 40.f, 80.f);
int beach_ht = (1.f - p777) * times + plus;
CLAMP(beach_ht, 90, 100);
if (res > beach_ht) // beaches
{
if (res > beach_ht + 21) res -= 18;
else res = ((res - beach_ht) / 7) + beach_ht;
}
float s = (1 + p204) * 0.2;
if (p800 > 0.0 + s)
{
float t = (p800 - 0.0 - s) * 10;
CLAMP(t, 0.f, 1.f);
res = lerp(t, res, 102);
if (res == 102 && swoosh) res = 101;
}
hmap2[x][z] = res < TILESH - 1 ? res : TILESH - 1;
}
}
void create_hmap()
{
// generate in pieces
for (int i = 0; i < 8; i++) for (int j = 0; j < 8; j++)
{
int x0 = (i ) * TILESW / 8;
int x1 = (i+1) * TILESW / 8;
int z0 = (j ) * TILESD / 8;
int z1 = (j+1) * TILESD / 8;
CLAMP(x1, 0, TILESW-1);
CLAMP(z1, 0, TILESD-1);
gen_hmap(x0, x1, z0 , z1);
}
smooth_hmap();
}
void gen_chunk(int xlo, int xhi, int zlo, int zhi)
{
CLAMP(xlo, 0, TILESW-1);
CLAMP(xhi, 0, TILESW-1);
CLAMP(zlo, 0, TILESD-1);
CLAMP(zhi, 0, TILESD-1);
static char column_already_generated[TILESW][TILESD];
int x;
#pragma omp parallel for
for (x = xlo; x < xhi; x++) for (int z = zlo; z < zhi; z++)
{
if (x == xlo && z == zlo)
omp_threads = omp_get_num_threads();
if (column_already_generated[x][z])
continue;
column_already_generated[x][z] = true;
float p1080 = noise(x, 0, -z, 1080);
float p530 = noise(z, 0, x, 530);
float p630 = noise(-z, 0, x, 629);
float p200 = noise(x, 0, z, 200);
float p80 = noise(x, 0, z, 80);
float p15 = noise(z, 0, -x, 15);
//float p5 = noise(-x, 0, z, 5);
if (p200 > 0.2f)
{
float flatten = (p200 - 0.2f) * 80;
CLAMP(flatten, 1, 12);
hmap2[x][z] -= 100;
hmap2[x][z] /= flatten;
hmap2[x][z] += 100;
}
int solid_depth = 0;
int slicey_bit = false;
int plateau_bit = false;
int mode = p1080 > 0 ? 1 : 10;
for (int y = 0; y < TILESH; y++)
{
if (y == TILESH - 1) { TT_(x, y, z) = HARD; continue; }
float p300 = noise(x, y, z, 300);
float p32 = noise(x, y*mode, z, 16 + 16 * (1.1 + p300));
float plat = p32 > 0.3 ? (10 - 30 * (p32 * p32 * p32 - 0.3)) : 0;
float p90 = noise(x, y, z, 90);
float p91 = noise(x+1000, y+1000, z+1000, 91);
float p42 = noise(x, y*(p300 + 1), z, 42);
float p9 = noise(x, y*0.05, z, 9);
float p2 = noise(-z, y, x, 2);
if (p300 + fabsf(p80) * 0.25 + p15 * 0.125 < -0.5) { plat = -plat; }
else if (p300 < 0.5) { plat = 0; }
int cave = (p90 < -0.24 || p91 < -0.24) && (p42 > 0.5 && p9 < 0.4);
if (y > hmap2[x][z] - ((p80 + 1) * 20) && p90 > 0.4 && p91 > 0.4 && p42 > 0.01 && p42 < 0.09 && p300 > 0.3)
slicey_bit = true;
int platted = y < hmap2[x][z] + plat * (mode * 0.125f + 0.875f);
if ((cave || platted) && !plateau_bit)
{
unsigned seed = SEED2(x, z);
if (!slicey_bit || RANDP(5))
{
int type = (y > 100 && hmap2[x][z] > 99) ? WATR : OPEN; //only allow water below low heightmap
TT_(x, y, z) = type;
solid_depth = 0;
slicey_bit = false;
goto out;
}
}
else
{
if (mode == 10 && plat && !cave && y < hmap2[x][z])
plateau_bit = true;
slicey_bit = false;
}
solid_depth++;
float p16 = noise(x, y, z, 16);
int slv = 76 + p530 * 20;
int dlv = 86 + p630 * 20;
int ore = p2 > 0.4f ? ORE : OREH;
int ston = p42 > 0.4f && p9 < -0.3f ? ore : STON;
if (slicey_bit) TT_(x, y, z) = p9 > 0.4f ? HARD : SAND;
else if (solid_depth > 14 + 5 * p9) TT_(x, y, z) = GRAN;
else if (y < slv - 5 * p16) TT_(x, y, z) = ston;
else if (y < dlv - 5 * p16) TT_(x, y, z) = p80 > (-solid_depth * 0.1f) ? DIRT : OPEN; // erosion
else if (y < 100 - 5 * p16) TT_(x, y, z) = solid_depth == 1 ? GRAS : DIRT;
else if (y < 120 ) TT_(x, y, z) = solid_depth < 4 + 5 * p9 ? SAND : ston;
else TT_(x, y, z) = HARD;
out: ;
}
}
// find nearby bezier curvy caves
#define REGW (CHUNKW*16)
#define REGD (CHUNKD*16)
// find region ,-- have to add 1 bc we're overdrawing chunks
// lower bound /
int rxlo = (int)((xlo+1) / REGW) * REGW;
int rzlo = (int)((zlo+1) / REGD) * REGD;
unsigned seed = SEED2(rxlo, rzlo);
// find region center
int rxcenter = rxlo + REGW/2;
int rzcenter = rzlo + REGD/2;
struct point PC = (struct point){rxcenter, TILESH - RANDI(1, 25), rzcenter};
struct point P0;
struct point P1;
struct point P2;
struct point P3 = PC;
int nr_caves = RANDI(0, 100);
// cave system stretchiness
int sx = RANDI(10, 60);
int sy = RANDI(10, 60);
int sz = RANDI(10, 60);
#define MAX_CAVE_POINTS 10000
#define QCAVE(x,y,z,radius_sq) ((struct qcave){x, y, z, radius_sq})
struct qcave cave_points[MAX_CAVE_POINTS];
int cave_p_len = 0;
for (int i = 0; i < nr_caves; i++)
{
// random walk from center of region, or end of last curve
P0 = RANDP(33) ? PC : P3;
P1 = (struct point){P0.x + RANDI(-sx, sx), P0.y + RANDI(-sy, sy), P0.z + RANDI(-sz, sz)};
P2 = (struct point){P1.x + RANDI(-sx, sx), P1.y + RANDI(-sy, sy), P1.z + RANDI(-sz, sz)};
P3 = (struct point){P2.x + RANDI(-sx, sx), P2.y + RANDI(-sy, sy), P2.z + RANDI(-sz, sz)};
float root_radius = 0.f, delta = 0.f;
for (float t = 0.f; t <= 1.f; t += 0.001f)
{
if (cave_p_len >= MAX_CAVE_POINTS) break;
if (root_radius == 0.f || RANDP(0.002f))
{
root_radius = RAND01;
delta = RANDF(-0.001f, 0.001f);
}
root_radius += delta;
float radius_sq = root_radius * root_radius * root_radius * root_radius * 50.f;
CLAMP(radius_sq, 1.f, 50.f);
float s = 1.f - t;
int x = (int)(s*s*s*P0.x + 3.f*t*s*s*P1.x + 3.f*t*t*s*P2.x + t*t*t*P3.x);
int y = (int)(s*s*s*P0.y + 3.f*t*s*s*P1.y + 3.f*t*t*s*P2.y + t*t*t*P3.y);
int z = (int)(s*s*s*P0.z + 3.f*t*s*s*P1.z + 3.f*t*t*s*P2.z + t*t*t*P3.z);
// TODO: don't store duplicate cave points?
if (x >= xlo && x <= xhi && y >= 0 && y <= TILESD - 1 && z >= zlo && z <= zhi)
cave_points[cave_p_len++] = QCAVE(x, y, z, radius_sq);
}
}
// carve caves
#pragma omp parallel for
for (x = xlo; x < xhi; x++) for (int z = zlo; z < zhi; z++) for (int y = 0; y < TILESH-2; y++)
for (int i = 0; i < cave_p_len; i++)
{
int dist_sq = DIST_SQ(cave_points[i].x - x, cave_points[i].y - y, cave_points[i].z - z);
if (dist_sq <= cave_points[i].radius_sq)
{
TT_(x, y, z) = OPEN;
break;
}
}
// correcting pass over middle, contain floating water
#pragma omp parallel for
for (x = xlo+1; x < xhi-1; x++) for (int z = zlo+1; z < zhi-1; z++) for (int y = 100; y < TILESH-2; y++)
{
if (TT_(x, y, z) == WATR)
{
if (TT_(x , y , z-1) == OPEN ||
TT_(x , y , z+1) == OPEN ||
TT_(x-1, y , z ) == OPEN ||
TT_(x+1, y , z ) == OPEN ||
TT_(x , y+1, z ) == OPEN)
TT_(x, y, z) = WOOD;
}
}
// trees?
float p191 = noise(zlo, 0, xlo, 191);
seed = SEED2(xlo, zlo);
if (p191 > 0.2f) while (RANDP(95))
{
char leaves = RANDBOOL ? RLEF : YLEF;
float radius = RANDF(1.f, 4.f);
int x = xlo + CHUNKW/2 + RANDI(-5, 5);
int z = zlo + CHUNKD/2 + RANDI(-5, 5);
for (int y = 10; y < TILESH-2; y++)
{
if (TT_(x, y, z) == OPEN)
continue;
if (TT_(x, y, z) != GRAS && TT_(x, y, z) != DIRT)
break;
int yy = y;
for (; yy >= y - (int)RANDI(3, 8); yy--)
TT_(x, yy, z) = WOOD;
int ymax = yy + RANDI(2, 4);
for (int i = x-3; i <= x+3; i++) for (int j = yy-3; j <= ymax; j++) for (int k = z-3; k <= z+3; k++)
{
float dist = (i-x) * (i-x) + (j-yy) * (j-yy) + (k-z) * (k-z);
if (TT_(i, j, k) == OPEN && dist < radius * radius)
TT_(i, j, k) = leaves;
}
break;
}
}
// cleanup gndheight and set initial lighting
#pragma omp parallel for
for (x = xlo+1; x < xhi-1; x++) for (int z = zlo+1; z < zhi-1; z++)
{
int above_ground = true;
int light_level = 15;
int wet = false;
for (int y = 0; y < TILESH-1; y++)
{
if (above_ground && IS_OPAQUE(x, y, z))
{
TGNDH_(x, z) = y;
above_ground = false;
if (y)
{
TSUN_(x, y-1, z) = 0;
sun_enqueue(x, y-1, z, 0, light_level);
}
light_level = 0;
}
if (wet && TT_(x, y, z) == OPEN)
TT_(x, y, z) = WATR;
if (wet && IS_SOLID(x, y, z))
wet = false;
if (TT_(x, y, z) == WATR)
{
wet = true;
if (light_level) light_level--;
if (light_level) light_level--;
}
TSUN_(x, y, z) = light_level;
}
}
recalc_corner_lighting(xlo, xhi, zlo, zhi);
}
// update terrain worker thread(s) copies of scoot vars
void terrain_apply_scoot()
{
#pragma omp critical
{
tscootx = future_scootx * CHUNKW;
tscootz = future_scootz * CHUNKD;
tchunk_scootx = future_scootx;
tchunk_scootz = future_scootz;
}
}
// on its own thread, loops forever building chunks when needed
void chunk_builder()
{ for(;;) {
terrain_apply_scoot();
int best_x = 0, best_z = 0;
int px = (player[0].pos.x / BS + CHUNKW2) / CHUNKW;
int pz = (player[0].pos.z / BS + CHUNKD2) / CHUNKD;
CLAMP(px, 0, VAOW-1);
CLAMP(pz, 0, VAOD-1);
// find nearest ungenerated chunk
int best_dist = 99999999;
for (int x = 0; x < VAOW; x++) for (int z = 0; z < VAOD; z++)
{
if (TAGEN_(x, z)) continue;
int dist_sq = (x - px) * (x - px) + (z - pz) * (z - pz);
if (dist_sq < best_dist)
{
best_dist = dist_sq;
best_x = x;
best_z = z;
}
}
if (best_dist == 99999999)
{
SDL_Delay(1);
continue;
}
int xlo = best_x * CHUNKW;
int zlo = best_z * CHUNKD;
int xhi = xlo + CHUNKW;
int zhi = zlo + CHUNKD;
int ticks_before = SDL_GetTicks();
gen_chunk(xlo-1, xhi+1, zlo-1, zhi+1);
nr_chunks_generated++;
chunk_gen_ticks += SDL_GetTicks() - ticks_before;
TAGEN_(best_x, best_z) = true;
#pragma omp critical
{
just_generated[just_gen_len].x = best_x;
just_generated[just_gen_len].z = best_z;
just_gen_len++;
}
} }
|
declare_variant_ast_print.c | // RUN: %clang_cc1 -verify -fopenmp -x c -std=c99 -ast-print %s -o - -Wno-openmp-clauses | FileCheck %s
// RUN: %clang_cc1 -verify -fopenmp-simd -x c -std=c99 -ast-print %s -o - -Wno-openmp-clauses | FileCheck %s
// expected-no-diagnostics
int foo(void);
#pragma omp declare variant(foo) match(xxx={}, yyy={ccc})
#pragma omp declare variant(foo) match(xxx={vvv})
#pragma omp declare variant(foo) match(implementation={vendor(llvm)})
#pragma omp declare variant(foo) match(implementation={vendor(llvm), xxx})
#pragma omp declare variant(foo) match(implementation={vendor(unknown)})
#pragma omp declare variant(foo) match(implementation={vendor(score(5): ibm, xxx, ibm)})
int bar(void);
// CHECK: int foo();
// CHECK-NEXT: #pragma omp declare variant(foo) match(implementation={vendor(score(5):ibm, xxx)})
// CHECK-NEXT: #pragma omp declare variant(foo) match(implementation={vendor(score(0):unknown)})
// CHECK-NEXT: #pragma omp declare variant(foo) match(implementation={vendor(score(0):llvm)})
// CHECK-NEXT: #pragma omp declare variant(foo) match(implementation={vendor(score(0):llvm)})
// CHECK-NEXT: int bar();
|
pzgstrf2.c | /*! \file
Copyright (c) 2003, The Regents of the University of California, through
Lawrence Berkeley National Laboratory (subject to receipt of any required
approvals from U.S. Dept. of Energy)
All rights reserved.
The source code is distributed under BSD license, see the file License.txt
at the top-level directory.
*/
/*! @file
* \brief Performs panel LU factorization.
*
* <pre>
* -- Distributed SuperLU routine (version 5.2) --
* Lawrence Berkeley National Lab, Univ. of California Berkeley.
* August 15, 2014
*
* Modified:
* September 30, 2017
*
* <pre>
* Purpose
* =======
* Panel factorization -- block column k
*
* Factor diagonal and subdiagonal blocks and test for exact singularity.
* Only the column processes that own block column *k* participate
* in the work.
*
* Arguments
* =========
* options (input) superlu_dist_options_t* (global)
* The structure defines the input parameters to control
* how the LU decomposition will be performed.
*
* k0 (input) int (global)
* Counter of the next supernode to be factorized.
*
* k (input) int (global)
* The column number of the block column to be factorized.
*
* thresh (input) double (global)
* The threshold value = s_eps * anorm.
*
* Glu_persist (input) Glu_persist_t*
* Global data structures (xsup, supno) replicated on all processes.
*
* grid (input) gridinfo_t*
* The 2D process mesh.
*
* Llu (input/output) LocalLU_t*
* Local data structures to store distributed L and U matrices.
*
* U_diag_blk_send_req (input/output) MPI_Request*
* List of send requests to send down the diagonal block of U.
*
* tag_ub (input) int
* Upper bound of MPI tag values.
*
* stat (output) SuperLUStat_t*
* Record the statistics about the factorization.
* See SuperLUStat_t structure defined in util.h.
*
* info (output) int*
* = 0: successful exit
* < 0: if info = -i, the i-th argument had an illegal value
* > 0: if info = i, U(i,i) is exactly zero. The factorization has
* been completed, but the factor U is exactly singular,
* and division by zero will occur if it is used to solve a
* system of equations.
* </pre>
*/
#include <math.h>
#include "superlu_zdefs.h"
/* This pdgstrf2 is based on TRSM function */
void
pzgstrf2_trsm
(superlu_dist_options_t * options, int_t k0, int_t k, double thresh,
Glu_persist_t * Glu_persist, gridinfo_t * grid, LocalLU_t * Llu,
MPI_Request * U_diag_blk_send_req, int tag_ub,
SuperLUStat_t * stat, int *info)
{
/* printf("entering pzgstrf2 %d \n", grid->iam); */
int cols_left, iam, l, pkk, pr;
int incx = 1, incy = 1;
int nsupr; /* number of rows in the block (LDA) */
int nsupc; /* number of columns in the block */
int luptr;
int_t i, myrow, krow, j, jfst, jlst, u_diag_cnt;
int_t *xsup = Glu_persist->xsup;
doublecomplex *lusup, temp;
doublecomplex *ujrow, *ublk_ptr; /* pointer to the U block */
doublecomplex one = {1.0, 0.0}, alpha = {-1.0, 0.0};
int_t Pr;
MPI_Status status;
MPI_Comm comm = (grid->cscp).comm;
double t1, t2;
/* Initialization. */
iam = grid->iam;
Pr = grid->nprow;
myrow = MYROW (iam, grid);
krow = PROW (k, grid);
pkk = PNUM (PROW (k, grid), PCOL (k, grid), grid);
j = LBj (k, grid); /* Local block number */
jfst = FstBlockC (k);
jlst = FstBlockC (k + 1);
lusup = Llu->Lnzval_bc_ptr[j];
nsupc = SuperSize (k);
if (Llu->Lrowind_bc_ptr[j])
nsupr = Llu->Lrowind_bc_ptr[j][1];
else
nsupr = 0;
#ifdef PI_DEBUG
printf ("rank %d Iter %d k=%d \t ztrsm nsuper %d \n",
iam, k0, k, nsupr);
#endif
ublk_ptr = ujrow = Llu->ujrow;
luptr = 0; /* Point to the diagonal entries. */
cols_left = nsupc; /* supernode size */
int ld_ujrow = nsupc; /* leading dimension of ujrow */
u_diag_cnt = 0;
incy = ld_ujrow;
if ( U_diag_blk_send_req &&
U_diag_blk_send_req[myrow] != MPI_REQUEST_NULL ) {
/* There are pending sends - wait for all Isend to complete */
#if ( PROFlevel>=1 )
TIC (t1);
#endif
for (pr = 0; pr < Pr; ++pr) {
if (pr != myrow) {
MPI_Wait (U_diag_blk_send_req + pr, &status);
}
}
#if ( PROFlevel>=1 )
TOC (t2, t1);
stat->utime[COMM] += t2;
stat->utime[COMM_DIAG] += t2;
#endif
/* flag no more outstanding send request. */
U_diag_blk_send_req[myrow] = MPI_REQUEST_NULL;
}
if (iam == pkk) { /* diagonal process */
/* ++++ First step compute diagonal block ++++++++++ */
for (j = 0; j < jlst - jfst; ++j) { /* for each column in panel */
/* Diagonal pivot */
i = luptr;
if ( options->ReplaceTinyPivot == YES ) {
if ( slud_z_abs1(&lusup[i]) < thresh &&
lusup[i].r != 0.0 && lusup[i].i != 0.0 ) { /* Diagonal */
#if ( PRNTlevel>=2 )
printf ("(%d) .. col %d, tiny pivot %e ",
iam, jfst + j, lusup[i]);
#endif
/* Keep the new diagonal entry with the same sign. */
if ( lusup[i].r < 0 ) lusup[i].r = -thresh;
else lusup[i].r = thresh;
lusup[i].i = 0.0;
#if ( PRNTlevel>=2 )
printf ("replaced by %e\n", lusup[i]);
#endif
++(stat->TinyPivots);
}
}
#if 0
for (l = 0; l < cols_left; ++l, i += nsupr, ++u_diag_cnt)
ublk_ptr[u_diag_cnt] = lusup[i]; /* copy one row of U */
#endif
/* storing U in full form */
int st;
for (l = 0; l < cols_left; ++l, i += nsupr, ++u_diag_cnt) {
st = j * ld_ujrow + j;
ublk_ptr[st + l * ld_ujrow] = lusup[i]; /* copy one row of U */
}
/* Test for singularity. */
if ( ujrow[0].r == 0.0 && ujrow[0].i == 0.0 ) {
*info = j + jfst + 1;
} else { /* Scale the j-th column within diag. block. */
slud_z_div(&temp, &one, &ujrow[0]);
for (i = luptr + 1; i < luptr - j + nsupc; ++i)
zz_mult(&lusup[i], &lusup[i], &temp);
stat->ops[FACT] += 6*(nsupc-j-1) + 10;
}
/* Rank-1 update of the trailing submatrix within diag. block. */
if (--cols_left) {
/* l = nsupr - j - 1; */
l = nsupc - j - 1; /* Piyush */
zgeru_(&l, &cols_left, &alpha, &lusup[luptr+1], &incx,
&ujrow[ld_ujrow], &incy, &lusup[luptr + nsupr + 1],
&nsupr);
stat->ops[FACT] += 8 * l * cols_left;
}
/* ujrow = ublk_ptr + u_diag_cnt; */
ujrow = ujrow + ld_ujrow + 1; /* move to next row of U */
luptr += nsupr + 1; /* move to next column */
} /* for column j ... first loop */
/* ++++ Second step compute off-diagonal block with communication ++*/
ublk_ptr = ujrow = Llu->ujrow;
if (U_diag_blk_send_req && iam == pkk) { /* Send the U block downward */
/** ALWAYS SEND TO ALL OTHERS - TO FIX **/
#if ( PROFlevel>=1 )
TIC (t1);
#endif
for (pr = 0; pr < Pr; ++pr) {
if (pr != krow) {
/* tag = ((k0<<2)+2) % tag_ub; */
/* tag = (4*(nsupers+k0)+2) % tag_ub; */
MPI_Isend (ublk_ptr, nsupc * nsupc, SuperLU_MPI_DOUBLE_COMPLEX, pr,
SLU_MPI_TAG (4, k0) /* tag */ ,
comm, U_diag_blk_send_req + pr);
}
}
#if ( PROFlevel>=1 )
TOC (t2, t1);
stat->utime[COMM] += t2;
stat->utime[COMM_DIAG] += t2;
#endif
/* flag outstanding Isend */
U_diag_blk_send_req[krow] = (MPI_Request) TRUE; /* Sherry */
}
/* pragma below would be changed by an MKL call */
l = nsupr - nsupc;
// n = nsupc;
doublecomplex alpha = {1.0, 0.0};
#ifdef PI_DEBUG
printf ("calling ztrsm\n");
printf ("ztrsm diagonal param 11: %d \n", nsupr);
#endif
#if defined (USE_VENDOR_BLAS)
ztrsm_ ("R", "U", "N", "N", &l, &nsupc,
&alpha, ublk_ptr, &ld_ujrow, &lusup[nsupc], &nsupr,
1, 1, 1, 1);
#else
ztrsm_ ("R", "U", "N", "N", &l, &nsupc,
&alpha, ublk_ptr, &ld_ujrow, &lusup[nsupc], &nsupr);
#endif
stat->ops[FACT] += 4.0 * ((flops_t) nsupc * (nsupc+1) * l);
} else { /* non-diagonal process */
/* ================================================================== *
* Receive the diagonal block of U for panel factorization of L(:,k). *
* Note: we block for panel factorization of L(:,k), but panel *
* factorization of U(:,k) do not block *
* ================================================================== */
/* tag = ((k0<<2)+2) % tag_ub; */
/* tag = (4*(nsupers+k0)+2) % tag_ub; */
// printf("hello message receiving%d %d\n",(nsupc*(nsupc+1))>>1,SLU_MPI_TAG(4,k0));
#if ( PROFlevel>=1 )
TIC (t1);
#endif
MPI_Recv (ublk_ptr, (nsupc * nsupc), SuperLU_MPI_DOUBLE_COMPLEX, krow,
SLU_MPI_TAG (4, k0) /* tag */ ,
comm, &status);
#if ( PROFlevel>=1 )
TOC (t2, t1);
stat->utime[COMM] += t2;
stat->utime[COMM_DIAG] += t2;
#endif
if (nsupr > 0) {
doublecomplex alpha = {1.0, 0.0};
#ifdef PI_DEBUG
printf ("ztrsm non diagonal param 11: %d \n", nsupr);
if (!lusup)
printf (" Rank :%d \t Empty block column occurred :\n", iam);
#endif
#if defined (USE_VENDOR_BLAS)
ztrsm_ ("R", "U", "N", "N", &nsupr, &nsupc,
&alpha, ublk_ptr, &ld_ujrow, lusup, &nsupr, 1, 1, 1, 1);
#else
ztrsm_ ("R", "U", "N", "N", &nsupr, &nsupc,
&alpha, ublk_ptr, &ld_ujrow, lusup, &nsupr);
#endif
stat->ops[FACT] += 4.0 * ((flops_t) nsupc * (nsupc+1) * nsupr);
}
} /* end if pkk ... */
/* printf("exiting pzgstrf2 %d \n", grid->iam); */
} /* PZGSTRF2_trsm */
/************************************************************************/
void pzgstrs2_omp
/************************************************************************/
(int_t k0, int_t k, Glu_persist_t * Glu_persist,
gridinfo_t * grid, LocalLU_t * Llu, SuperLUStat_t * stat)
{
#ifdef PI_DEBUG
printf("====Entering pzgstrs2==== \n");
#endif
int iam, pkk;
int incx = 1;
int nsupr; /* number of rows in the block L(:,k) (LDA) */
int segsize;
int nsupc; /* number of columns in the block */
int_t luptr, iukp, rukp;
int_t b, gb, j, klst, knsupc, lk, nb;
int_t *xsup = Glu_persist->xsup;
int_t *usub;
doublecomplex *lusup, *uval;
#if 0
//#ifdef USE_VTUNE
__SSC_MARK(0x111);// start SDE tracing, note uses 2 underscores
__itt_resume(); // start VTune, again use 2 underscores
#endif
/* Quick return. */
lk = LBi (k, grid); /* Local block number */
if (!Llu->Unzval_br_ptr[lk]) return;
/* Initialization. */
iam = grid->iam;
pkk = PNUM (PROW (k, grid), PCOL (k, grid), grid);
//int k_row_cycle = k / grid->nprow; /* for which cycle k exist (to assign rowwise thread blocking) */
//int gb_col_cycle; /* cycle through block columns */
klst = FstBlockC (k + 1);
knsupc = SuperSize (k);
usub = Llu->Ufstnz_br_ptr[lk]; /* index[] of block row U(k,:) */
uval = Llu->Unzval_br_ptr[lk];
if (iam == pkk) {
lk = LBj (k, grid);
nsupr = Llu->Lrowind_bc_ptr[lk][1]; /* LDA of lusup[] */
lusup = Llu->Lnzval_bc_ptr[lk];
} else {
nsupr = Llu->Lsub_buf_2[k0 % (1 + stat->num_look_aheads)][1]; /* LDA of lusup[] */
lusup = Llu->Lval_buf_2[k0 % (1 + stat->num_look_aheads)];
}
/////////////////////new-test//////////////////////////
/* !! Taken from Carl/SuperLU_DIST_5.1.0/EXAMPLE/pdgstrf2_v3.c !! */
/* Master thread: set up pointers to each block in the row */
nb = usub[0];
iukp = BR_HEADER;
rukp = 0;
int* blocks_index_pointers = SUPERLU_MALLOC (3 * nb * sizeof(int));
int* blocks_value_pointers = blocks_index_pointers + nb;
int* nsupc_temp = blocks_value_pointers + nb;
for (b = 0; b < nb; b++) { /* set up pointers to each block */
blocks_index_pointers[b] = iukp + UB_DESCRIPTOR;
blocks_value_pointers[b] = rukp;
gb = usub[iukp];
rukp += usub[iukp+1];
nsupc = SuperSize( gb );
nsupc_temp[b] = nsupc;
iukp += (UB_DESCRIPTOR + nsupc); /* move to the next block */
}
// Sherry: this version is more NUMA friendly compared to pdgstrf2_v2.c
// https://stackoverflow.com/questions/13065943/task-based-programming-pragma-omp-task-versus-pragma-omp-parallel-for
#pragma omp parallel for schedule(static) default(shared) \
private(b,j,iukp,rukp,segsize)
/* Loop through all the blocks in the row. */
for (b = 0; b < nb; ++b) {
iukp = blocks_index_pointers[b];
rukp = blocks_value_pointers[b];
/* Loop through all the segments in the block. */
for (j = 0; j < nsupc_temp[b]; j++) {
segsize = klst - usub[iukp++];
if (segsize) {
#pragma omp task default(shared) firstprivate(segsize,rukp) if (segsize > 30)
{ /* Nonzero segment. */
int_t luptr = (knsupc - segsize) * (nsupr + 1);
//printf("[2] segsize %d, nsupr %d\n", segsize, nsupr);
#if defined (USE_VENDOR_BLAS)
ztrsv_ ("L", "N", "U", &segsize, &lusup[luptr], &nsupr,
&uval[rukp], &incx, 1, 1, 1);
#else
ztrsv_ ("L", "N", "U", &segsize, &lusup[luptr], &nsupr,
&uval[rukp], &incx);
#endif
} /* end task */
rukp += segsize;
stat->ops[FACT] += segsize * (segsize + 1);
} /* end if segsize > 0 */
} /* end for j in parallel ... */
/* #pragma omp taskwait */
} /* end for b ... */
/* Deallocate memory */
SUPERLU_FREE(blocks_index_pointers);
#if 0
//#ifdef USE_VTUNE
__itt_pause(); // stop VTune
__SSC_MARK(0x222); // stop SDE tracing
#endif
} /* PZGSTRS2_omp */
|
openmp_no_error.c | #include <stdio.h>
#include <omp.h>
int main () {
int i = 0;
#pragma omp parallel
{
#pragma omp critical
{
++i;
}
}
printf("Ausgabe: %d\n", i);
return i;
}
|
reduce-kernels.h | #ifndef __REDUCE_KERNELS_FUNCTIONAL_CORE_H__
#define __REDUCE_KERNELS_FUNCTIONAL_CORE_H__
#include <omp.h>
#include "../../../macros/macros.h"
#include "../../../types/types.h"
#include "../../../meta/meta.h"
namespace __core__ {
namespace __functional__ {
namespace __reduce__ {
namespace __array__ {
namespace __private__ {
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunknown-pragmas"
template <typename fn_T,int threadnum,typename RT,typename T,typename IT> __optimize__
RT __reduce_ckernel__(RT& result,CRESTRICT_Q(T*) arr,IT size) {
RT result_mem[threadnum];
if(threadnum>1) {
#pragma omp parallel num_threads(threadnum) shared(result_mem,result)
{
int threadid=-1;
RT result_local;
#pragma omp for
for(IT i=0;i<size;++i) {
if(threadid==-1) {
threadid=omp_get_thread_num();
result_local=arr[i];
}
else
result_local=fn_T::fn(arr[i],result_local);
}
result_mem[threadid]=result_local;
int thread_count=omp_get_num_threads();
#pragma omp barrier
#pragma omp single
{
result=result_mem[0];
for(int i=1;i<thread_count;++i)
result=fn_T::fn(result_mem[i],result);
}
}
}
else {
for(IT i=0;i<size;++i) {
if(i==0)
result=arr[0];
else
result=fn_T::fn(arr[i],result);
}
}
return result;
}
template <typename fn_T,typename IV,int threadnum,typename RT,typename T,typename IT> __optimize__
RT __reduce_ckernel__(RT& result,CRESTRICT_Q(T*) arr,IT size) {
RT result_mem[threadnum];
if(threadnum>1) {
result=IV::value;
#pragma omp parallel num_threads(threadnum) shared(result_mem,result)
{
int threadid=omp_get_thread_num();
RT result_local=IV::value;
#pragma omp for
for(IT i=0;i<size;++i)
result_local=fn_T::fn(arr[i],result_local);
result_mem[threadid]=result_local;
int thread_count=omp_get_num_threads();
#pragma omp barrier
#pragma omp single
{
for(int i=0;i<thread_count;++i)
result=fn_T::fn(result_mem[i],result);
}
}
}
else {
result=IV::value;
for(IT i=0;i<size;++i)
result=fn_T::fn(arr[i],result);
}
return result;
}
template <typename fn_T,int threadnum,typename RT,typename T,typename IT> __optimize__
RT __reduce_ckernel__(RT& result,CRESTRICT_Q(T*) arr,IT size,const RT &ival) {
RT result_mem[threadnum];
if(threadnum>1) {
#pragma omp parallel num_threads(threadnum) shared(result_mem,result)
{
int threadid=omp_get_thread_num();
RT result_local=ival;
#pragma omp for
for(IT i=0;i<size;++i)
result_local=fn_T::fn(arr[i],result_local);
result_mem[threadid]=result_local;
int thread_count=omp_get_num_threads();
#pragma omp barrier
#pragma omp single
{
result=result_mem[0];
for(int i=1;i<thread_count;++i)
result=fn_T::fn(result_mem[i],result);
}
}
}
else {
result=ival;
for(IT i=0;i<size;++i)
result=fn_T::fn(arr[i],result);
}
return result;
}
template <typename reduce_FT,typename apply_FT,int threadnum,typename RT,typename T,typename IT> __optimize__
RT __reduce_apply_ckernel__(RT& result,CRESTRICT_Q(T*) arr,IT size) {
RT result_mem[threadnum];
if(threadnum>1) {
#pragma omp parallel num_threads(threadnum) shared(result_mem,result)
{
int threadid=-1;
RT result_local;
#pragma omp for
for(IT i=0;i<size;++i) {
if(threadid==-1) {
threadid=omp_get_thread_num();
result_local=apply_FT::fn(arr[i]);
}
else
result_local=reduce_FT::fn(apply_FT::fn(arr[i]),result_local);
}
result_mem[threadid]=result_local;
int thread_count=omp_get_num_threads();
#pragma omp barrier
#pragma omp single
{
result=result_mem[0];
for(int i=1;i<thread_count;++i)
result=reduce_FT::fn(result_mem[i],result);
}
}
}
else {
for(IT i=0;i<size;++i) {
if(i==0)
result=apply_FT::fn(arr[0]);
else
result=reduce_FT::fn(apply_FT::fn(arr[i]),result);
}
}
return result;
}
template <typename reduce_FT,typename apply_FT,typename IV,int threadnum,typename RT,typename T,typename IT> __optimize__
RT __reduce_apply_ckernel__(RT& result,CRESTRICT_Q(T*) arr,IT size) {
RT result_mem[threadnum];
if(threadnum>1) {
#pragma omp parallel num_threads(threadnum) shared(result_mem,result)
{
int threadid=omp_get_thread_num();
RT result_local=IV::value;
#pragma omp for
for(IT i=0;i<size;++i)
result_local=reduce_FT::fn(apply_FT::fn(arr[i]),result_local);
result_mem[threadid]=result_local;
int thread_count=omp_get_num_threads();
#pragma omp barrier
#pragma omp single
{
result=result_mem[0];
for(int i=1;i<thread_count;++i)
result=reduce_FT::fn(result_mem[i],result);
}
}
}
else {
result=IV::value;
for(IT i=0;i<size;++i)
result=reduce_FT::fn(apply_FT::fn(arr[i]),result);
}
return result;
}
template <typename reduce_FT,typename apply_FT,int threadnum,typename RT,typename T,typename IT> __optimize__
RT __reduce_apply_ckernel__(RT& result,CRESTRICT_Q(T*) arr,IT size,const RT &ival) {
RT result_mem[threadnum];
if(threadnum>1) {
#pragma omp parallel num_threads(threadnum) shared(result_mem,result)
{
int threadid=omp_get_thread_num();
RT result_local=ival;
#pragma omp for
for(IT i=0;i<size;++i)
result_local=reduce_FT::fn(apply_FT::fn(arr[i]),result_local);
result_mem[threadid]=result_local;
int thread_count=omp_get_num_threads();
#pragma omp barrier
#pragma omp single
{
result=result_mem[0];
for(int i=1;i<thread_count;++i)
result=reduce_FT::fn(result_mem[i],result);
}
}
}
else {
result=ival;
for(IT i=0;i<size;++i)
result=reduce_FT::fn(apply_FT::fn(arr[i]),result);
}
return result;
}
template <typename reduce_FT,typename apply_FT,int threadnum,typename RT,typename V,typename U,typename IT> __optimize__
RT __reduce_apply_ckernel__(RT& result,CRESTRICT_Q(V*) arr1,CRESTRICT_Q(U*) arr2,IT size) {
RT result_mem[threadnum];
if(threadnum>1) {
#pragma omp parallel num_threads(threadnum) shared(result_mem,result)
{
int threadid=-1;
RT result_local;
#pragma omp for
for(IT i=0;i<size;++i) {
if(threadid==-1) {
threadid=omp_get_thread_num();
result_local=apply_FT::fn(arr1[i],arr2[i]);
}
else
result_local=reduce_FT::fn(apply_FT::fn(arr1[i],arr2[i]),result_local);
}
result_mem[threadid]=result_local;
int thread_count=omp_get_num_threads();
#pragma omp barrier
#pragma omp single
{
result=result_mem[0];
for(int i=1;i<thread_count;++i)
result=reduce_FT::fn(result_mem[i],result);
}
}
}
else {
for(IT i=0;i<size;++i) {
if(i==0)
result=apply_FT::fn(arr1[0]);
else
result=reduce_FT::fn(apply_FT::fn(arr1[i],arr2[i]),result);
}
}
return result;
}
template <typename reduce_FT,typename apply_FT,typename IV,int threadnum,typename RT,typename V,typename U,typename IT> __optimize__
RT __reduce_apply_ckernel__(RT& result,CRESTRICT_Q(V*) arr1,CRESTRICT_Q(U*) arr2,IT size) {
RT result_mem[threadnum];
if(threadnum>1) {
#pragma omp parallel num_threads(threadnum) shared(result_mem,result)
{
int threadid=omp_get_thread_num();
RT result_local=IV::value;
#pragma omp for
for(IT i=0;i<size;++i)
result_local=reduce_FT::fn(apply_FT::fn(arr1[i],arr2[i]),result_local);
result_mem[threadid]=result_local;
int thread_count=omp_get_num_threads();
#pragma omp barrier
#pragma omp single
{
result=result_mem[0];
for(int i=1;i<thread_count;++i)
result=reduce_FT::fn(result_mem[i],result);
}
}
}
else {
result=IV::value;
for(IT i=0;i<size;++i)
result=reduce_FT::fn(apply_FT::fn(arr1[i],arr2[i]),result);
}
return result;
}
template <typename reduce_FT,typename apply_FT,int threadnum,typename RT,typename V,typename U,typename IT> __optimize__
RT __reduce_apply_ckernel__(RT& result,CRESTRICT_Q(V*) arr1,CRESTRICT_Q(U*) arr2,IT size,const RT &ival) {
RT result_mem[threadnum];
if(threadnum>1) {
#pragma omp parallel num_threads(threadnum) shared(result_mem,result)
{
int threadid=omp_get_thread_num();
RT result_local=ival;
#pragma omp for
for(IT i=0;i<size;++i)
result_local=reduce_FT::fn(apply_FT::fn(arr1[i],arr2[i]),result_local);
result_mem[threadid]=result_local;
int thread_count=omp_get_num_threads();
#pragma omp barrier
#pragma omp single
{
result=result_mem[0];
for(int i=1;i<thread_count;++i)
result=reduce_FT::fn(result_mem[i],result);
}
}
}
else {
result=ival;
for(IT i=0;i<size;++i)
result=reduce_FT::fn(apply_FT::fn(arr1[i],arr2[i]),result);
}
return result;
}
#pragma GCC diagnostic pop
}
}
}
}
}
#endif
|
par_nongalerkin.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "../HYPRE.h"
/* This file contains the routines for constructing non-Galerkin coarse grid
* operators, based on the original Galerkin coarse grid
*/
/* Take all of the indices from indices[start, start+1, start+2, ..., end]
* and take the corresponding entries in array and place them in-order in output.
* Assumptions:
* output is of length end-start+1
* indices never contains an index that goes out of bounds in array
* */
HYPRE_Int
hypre_GrabSubArray(HYPRE_Int * indices,
HYPRE_Int start,
HYPRE_Int end,
HYPRE_BigInt * array,
HYPRE_BigInt * output)
{
HYPRE_Int i, length;
length = end - start + 1;
for(i = 0; i < length; i++)
{ output[i] = array[ indices[start + i] ]; }
return 0;
}
/* Compute the intersection of x and y, placing
* the intersection in z. Additionally, the array
* x_data is associated with x, i.e., the entries
* that we grab from x, we also grab from x_data.
* If x[k] is placed in z[m], then x_data[k] goes to
* output_x_data[m].
*
* Assumptions:
* z is of length min(x_length, y_length)
* x and y are sorted
* x_length and y_length are similar in size, otherwise,
* looping over the smaller array and doing binary search
* in the longer array is faster.
* */
HYPRE_Int
hypre_IntersectTwoArrays(HYPRE_Int *x,
HYPRE_Real *x_data,
HYPRE_Int x_length,
HYPRE_Int *y,
HYPRE_Int y_length,
HYPRE_Int *z,
HYPRE_Real *output_x_data,
HYPRE_Int *intersect_length)
{
HYPRE_Int x_index = 0;
HYPRE_Int y_index = 0;
*intersect_length = 0;
/* Compute Intersection, looping over each array */
while ( (x_index < x_length) && (y_index < y_length) )
{
if (x[x_index] > y[y_index])
{
y_index = y_index + 1;
}
else if (x[x_index] < y[y_index])
{
x_index = x_index + 1;
}
else
{
z[*intersect_length] = x[x_index];
output_x_data[*intersect_length] = x_data[x_index];
x_index = x_index + 1;
y_index = y_index + 1;
*intersect_length = *intersect_length + 1;
}
}
return 1;
}
HYPRE_Int
hypre_IntersectTwoBigArrays(HYPRE_BigInt *x,
HYPRE_Real *x_data,
HYPRE_Int x_length,
HYPRE_BigInt *y,
HYPRE_Int y_length,
HYPRE_BigInt *z,
HYPRE_Real *output_x_data,
HYPRE_Int *intersect_length)
{
HYPRE_Int x_index = 0;
HYPRE_Int y_index = 0;
*intersect_length = 0;
/* Compute Intersection, looping over each array */
while ( (x_index < x_length) && (y_index < y_length) )
{
if (x[x_index] > y[y_index])
{
y_index = y_index + 1;
}
else if (x[x_index] < y[y_index])
{
x_index = x_index + 1;
}
else
{
z[*intersect_length] = x[x_index];
output_x_data[*intersect_length] = x_data[x_index];
x_index = x_index + 1;
y_index = y_index + 1;
*intersect_length = *intersect_length + 1;
}
}
return 1;
}
/* Copy CSR matrix A to CSR matrix B. The column indices are
* assumed to be sorted, and the sparsity pattern of B is a subset
* of the sparsity pattern of A.
*
* Assumptions:
* Column indices of A and B are sorted
* Sparsity pattern of B is a subset of A's
* A and B are the same size and have same data layout
**/
HYPRE_Int
hypre_SortedCopyParCSRData(hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *B)
{
/* Grab off A and B's data structures */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B);
HYPRE_Int *B_diag_i = hypre_CSRMatrixI(B_diag);
HYPRE_Int *B_diag_j = hypre_CSRMatrixJ(B_diag);
HYPRE_Real *B_diag_data = hypre_CSRMatrixData(B_diag);
hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B);
HYPRE_Int *B_offd_i = hypre_CSRMatrixI(B_offd);
HYPRE_Int *B_offd_j = hypre_CSRMatrixJ(B_offd);
HYPRE_Real *B_offd_data = hypre_CSRMatrixData(B_offd);
HYPRE_Int num_variables = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int *temp_int_array = NULL;
HYPRE_Int temp_int_array_length=0;
HYPRE_Int i, length, offset_A, offset_B;
for(i = 0; i < num_variables; i++)
{
/* Deal with the first row entries, which may be diagonal elements */
if( A_diag_j[A_diag_i[i]] == i)
{ offset_A = 1; }
else
{ offset_A = 0; }
if( B_diag_j[B_diag_i[i]] == i)
{ offset_B = 1; }
else
{ offset_B = 0; }
if( (offset_B == 1) && (offset_A == 1) )
{ B_diag_data[B_diag_i[i]] = A_diag_data[A_diag_i[i]]; }
/* This finds the intersection of the column indices, and
* also copies the matching data in A to the data array in B
**/
if( (A_diag_i[i+1] - A_diag_i[i] - offset_A) > temp_int_array_length )
{
hypre_TFree(temp_int_array, HYPRE_MEMORY_HOST);
temp_int_array_length = (A_diag_i[i+1] - A_diag_i[i] - offset_A);
temp_int_array = hypre_CTAlloc(HYPRE_Int, temp_int_array_length, HYPRE_MEMORY_HOST);
}
hypre_IntersectTwoArrays(&(A_diag_j[A_diag_i[i] + offset_A]),
&(A_diag_data[A_diag_i[i] + offset_A]),
A_diag_i[i+1] - A_diag_i[i] - offset_A,
&(B_diag_j[B_diag_i[i] + offset_B]),
B_diag_i[i+1] - B_diag_i[i] - offset_B,
temp_int_array,
&(B_diag_data[B_diag_i[i] + offset_B]),
&length);
if( (A_offd_i[i+1] - A_offd_i[i]) > temp_int_array_length )
{
hypre_TFree(temp_int_array, HYPRE_MEMORY_HOST);
temp_int_array_length = (A_offd_i[i+1] - A_offd_i[i]);
temp_int_array = hypre_CTAlloc(HYPRE_Int, temp_int_array_length, HYPRE_MEMORY_HOST);
}
hypre_IntersectTwoArrays(&(A_offd_j[A_offd_i[i]]),
&(A_offd_data[A_offd_i[i]]),
A_offd_i[i+1] - A_offd_i[i],
&(B_offd_j[B_offd_i[i]]),
B_offd_i[i+1] - B_offd_i[i],
temp_int_array,
&(B_offd_data[B_offd_i[i]]),
&length);
}
if(temp_int_array)
{ hypre_TFree(temp_int_array, HYPRE_MEMORY_HOST); }
return 1;
}
/*
* Equivalent to hypre_BoomerAMGCreateS, except, the data array of S
* is not Null and contains the data entries from A.
*/
HYPRE_Int
hypre_BoomerAMG_MyCreateS(hypre_ParCSRMatrix *A,
HYPRE_Real strength_threshold,
HYPRE_Real max_row_sum,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
hypre_ParCSRMatrix **S_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Real *A_offd_data = NULL;
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A);
HYPRE_Int num_variables = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt global_num_vars = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_Int num_nonzeros_diag;
HYPRE_Int num_nonzeros_offd = 0;
HYPRE_Int num_cols_offd = 0;
hypre_ParCSRMatrix *S;
hypre_CSRMatrix *S_diag;
HYPRE_Int *S_diag_i;
HYPRE_Int *S_diag_j;
HYPRE_Real *S_diag_data;
hypre_CSRMatrix *S_offd;
HYPRE_Int *S_offd_i = NULL;
HYPRE_Int *S_offd_j = NULL;
HYPRE_Real *S_offd_data;
HYPRE_Real diag, row_scale, row_sum;
HYPRE_Int i, jA, jS;
HYPRE_Int ierr = 0;
HYPRE_Int *dof_func_offd;
HYPRE_Int num_sends;
HYPRE_Int *int_buf_data;
HYPRE_Int index, start, j;
/*--------------------------------------------------------------
* Compute a ParCSR strength matrix, S.
*
* For now, the "strength" of dependence/influence is defined in
* the following way: i depends on j if
* aij > hypre_max (k != i) aik, aii < 0
* or
* aij < hypre_min (k != i) aik, aii >= 0
* Then S_ij = aij, else S_ij = 0.
*
* NOTE: the entries are negative initially, corresponding
* to "unaccounted-for" dependence.
*----------------------------------------------------------------*/
num_nonzeros_diag = A_diag_i[num_variables];
num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
A_offd_i = hypre_CSRMatrixI(A_offd);
num_nonzeros_offd = A_offd_i[num_variables];
/* Initialize S */
S = hypre_ParCSRMatrixCreate(comm, global_num_vars, global_num_vars,
row_starts, row_starts,
num_cols_offd, num_nonzeros_diag, num_nonzeros_offd);
/* row_starts is owned by A, col_starts = row_starts */
hypre_ParCSRMatrixSetRowStartsOwner(S,0);
S_diag = hypre_ParCSRMatrixDiag(S);
hypre_CSRMatrixI(S_diag) = hypre_CTAlloc(HYPRE_Int, num_variables+1, HYPRE_MEMORY_HOST);
hypre_CSRMatrixJ(S_diag) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_diag, HYPRE_MEMORY_HOST);
hypre_CSRMatrixData(S_diag) = hypre_CTAlloc(HYPRE_Real, num_nonzeros_diag, HYPRE_MEMORY_HOST);
S_offd = hypre_ParCSRMatrixOffd(S);
hypre_CSRMatrixI(S_offd) = hypre_CTAlloc(HYPRE_Int, num_variables+1, HYPRE_MEMORY_HOST);
S_diag_i = hypre_CSRMatrixI(S_diag);
S_diag_j = hypre_CSRMatrixJ(S_diag);
S_diag_data = hypre_CSRMatrixData(S_diag);
S_offd_i = hypre_CSRMatrixI(S_offd);
hypre_CSRMatrixMemoryLocation(S_diag) = HYPRE_MEMORY_HOST;
hypre_CSRMatrixMemoryLocation(S_offd) = HYPRE_MEMORY_HOST;
dof_func_offd = NULL;
if (num_cols_offd)
{
A_offd_data = hypre_CSRMatrixData(A_offd);
hypre_CSRMatrixJ(S_offd) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_offd, HYPRE_MEMORY_HOST);
hypre_CSRMatrixData(S_offd) = hypre_CTAlloc(HYPRE_Real, num_nonzeros_offd, HYPRE_MEMORY_HOST);
S_offd_j = hypre_CSRMatrixJ(S_offd);
S_offd_data = hypre_CSRMatrixData(S_offd);
hypre_ParCSRMatrixColMapOffd(S) = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1)
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
}
/*-------------------------------------------------------------------
* Get the dof_func data for the off-processor columns
*-------------------------------------------------------------------*/
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
if (num_functions > 1)
{
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
}
/* give S same nonzero structure as A */
hypre_ParCSRMatrixCopy(A,S,1);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,diag,row_scale,row_sum,jA) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_variables; i++)
{
diag = A_diag_data[A_diag_i[i]];
/* compute scaling factor and row sum */
row_scale = 0.0;
row_sum = diag;
if (num_functions > 1)
{
if (diag < 0)
{
for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)
{
if (dof_func[i] == dof_func[A_diag_j[jA]])
{
row_scale = hypre_max(row_scale, A_diag_data[jA]);
row_sum += A_diag_data[jA];
}
}
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
if (dof_func[i] == dof_func_offd[A_offd_j[jA]])
{
row_scale = hypre_max(row_scale, A_offd_data[jA]);
row_sum += A_offd_data[jA];
}
}
}
else
{
for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)
{
if (dof_func[i] == dof_func[A_diag_j[jA]])
{
row_scale = hypre_min(row_scale, A_diag_data[jA]);
row_sum += A_diag_data[jA];
}
}
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
if (dof_func[i] == dof_func_offd[A_offd_j[jA]])
{
row_scale = hypre_min(row_scale, A_offd_data[jA]);
row_sum += A_offd_data[jA];
}
}
}
}
else
{
if (diag < 0)
{
for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)
{
row_scale = hypre_max(row_scale, A_diag_data[jA]);
row_sum += A_diag_data[jA];
}
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
row_scale = hypre_max(row_scale, A_offd_data[jA]);
row_sum += A_offd_data[jA];
}
}
else
{
for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)
{
row_scale = hypre_min(row_scale, A_diag_data[jA]);
row_sum += A_diag_data[jA];
}
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
row_scale = hypre_min(row_scale, A_offd_data[jA]);
row_sum += A_offd_data[jA];
}
}
}
/* compute row entries of S */
S_diag_j[A_diag_i[i]] = -1;
if ((fabs(row_sum) > fabs(diag)*max_row_sum) && (max_row_sum < 1.0))
{
/* make all dependencies weak */
for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)
{
S_diag_j[jA] = -1;
}
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
S_offd_j[jA] = -1;
}
}
else
{
if (num_functions > 1)
{
if (diag < 0)
{
for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)
{
if (A_diag_data[jA] <= strength_threshold * row_scale
|| dof_func[i] != dof_func[A_diag_j[jA]])
{
S_diag_j[jA] = -1;
}
}
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
if (A_offd_data[jA] <= strength_threshold * row_scale
|| dof_func[i] != dof_func_offd[A_offd_j[jA]])
{
S_offd_j[jA] = -1;
}
}
}
else
{
for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)
{
if (A_diag_data[jA] >= strength_threshold * row_scale
|| dof_func[i] != dof_func[A_diag_j[jA]])
{
S_diag_j[jA] = -1;
}
}
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
if (A_offd_data[jA] >= strength_threshold * row_scale
|| dof_func[i] != dof_func_offd[A_offd_j[jA]])
{
S_offd_j[jA] = -1;
}
}
}
}
else
{
if (diag < 0)
{
for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)
{
if (A_diag_data[jA] <= strength_threshold * row_scale)
{
S_diag_j[jA] = -1;
}
}
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
if (A_offd_data[jA] <= strength_threshold * row_scale)
{
S_offd_j[jA] = -1;
}
}
}
else
{
for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)
{
if (A_diag_data[jA] >= strength_threshold * row_scale)
{
S_diag_j[jA] = -1;
}
}
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
if (A_offd_data[jA] >= strength_threshold * row_scale)
{
S_offd_j[jA] = -1;
}
}
}
}
}
}
/*--------------------------------------------------------------
* "Compress" the strength matrix.
*
* NOTE: S has *NO DIAGONAL ELEMENT* on any row. Caveat Emptor!
*
* NOTE: This "compression" section of code may not be removed, the
* non-Galerkin routine depends on it.
*----------------------------------------------------------------*/
/* RDF: not sure if able to thread this loop */
jS = 0;
for (i = 0; i < num_variables; i++)
{
S_diag_i[i] = jS;
for (jA = A_diag_i[i]; jA < A_diag_i[i+1]; jA++)
{
if (S_diag_j[jA] > -1)
{
S_diag_j[jS] = S_diag_j[jA];
S_diag_data[jS] = S_diag_data[jA];
jS++;
}
}
}
S_diag_i[num_variables] = jS;
hypre_CSRMatrixNumNonzeros(S_diag) = jS;
/* RDF: not sure if able to thread this loop */
jS = 0;
for (i = 0; i < num_variables; i++)
{
S_offd_i[i] = jS;
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
if (S_offd_j[jA] > -1)
{
S_offd_j[jS] = S_offd_j[jA];
S_offd_data[jS] = S_offd_data[jA];
jS++;
}
}
}
S_offd_i[num_variables] = jS;
hypre_CSRMatrixNumNonzeros(S_offd) = jS;
hypre_ParCSRMatrixCommPkg(S) = NULL;
*S_ptr = S;
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
return (ierr);
}
/**
* Initialize the IJBuffer counters
**/
HYPRE_Int
hypre_NonGalerkinIJBufferInit( HYPRE_Int *ijbuf_cnt, /* See NonGalerkinIJBufferWrite for parameter descriptions */
HYPRE_Int *ijbuf_rowcounter,
HYPRE_Int *ijbuf_numcols )
{
HYPRE_Int ierr = 0;
(*ijbuf_cnt) = 0;
(*ijbuf_rowcounter) = 1; /*Always points to the next row*/
ijbuf_numcols[0] = 0;
return ierr;
}
/**
* Initialize the IJBuffer counters
**/
HYPRE_Int
hypre_NonGalerkinIJBigBufferInit( HYPRE_Int *ijbuf_cnt, /* See NonGalerkinIJBufferWrite for parameter descriptions */
HYPRE_Int *ijbuf_rowcounter,
HYPRE_BigInt *ijbuf_numcols )
{
HYPRE_Int ierr = 0;
(*ijbuf_cnt) = 0;
(*ijbuf_rowcounter) = 1; /*Always points to the next row*/
ijbuf_numcols[0] = 0;
return ierr;
}
/**
* Update the buffer counters
**/
HYPRE_Int
hypre_NonGalerkinIJBufferNewRow(HYPRE_BigInt *ijbuf_rownums, /* See NonGalerkinIJBufferWrite for parameter descriptions */
HYPRE_Int *ijbuf_numcols,
HYPRE_Int *ijbuf_rowcounter,
HYPRE_BigInt new_row)
{
HYPRE_Int ierr = 0;
/* First check to see if the previous row was empty, and if so, overwrite that row */
if( ijbuf_numcols[(*ijbuf_rowcounter)-1] == 0 )
{
ijbuf_rownums[(*ijbuf_rowcounter)-1] = new_row;
}
else
{
/* Move to the next row */
ijbuf_rownums[(*ijbuf_rowcounter)] = new_row;
ijbuf_numcols[(*ijbuf_rowcounter)] = 0;
(*ijbuf_rowcounter)++;
}
return ierr;
}
/**
* Compress the current row in an IJ Buffer by removing duplicate entries
**/
HYPRE_Int
hypre_NonGalerkinIJBufferCompressRow( HYPRE_Int *ijbuf_cnt, /* See NonGalerkinIJBufferWrite for parameter descriptions */
HYPRE_Int ijbuf_rowcounter,
HYPRE_Real *ijbuf_data,
HYPRE_BigInt *ijbuf_cols,
HYPRE_BigInt *ijbuf_rownums,
HYPRE_Int *ijbuf_numcols)
{
HYPRE_Int ierr = 0;
HYPRE_Int nentries, i, nduplicate;
/* Compress the current row by removing any repeat entries,
* making sure to decrement ijbuf_cnt by nduplicate */
nentries = ijbuf_numcols[ ijbuf_rowcounter-1 ];
nduplicate = 0;
hypre_BigQsort1(ijbuf_cols, ijbuf_data, (*ijbuf_cnt)-nentries, (*ijbuf_cnt)-1 );
for(i =(*ijbuf_cnt)-nentries+1; i <= (*ijbuf_cnt)-1; i++)
{
if( ijbuf_cols[i] == ijbuf_cols[i-1] )
{
/* Shift duplicate entry down */
nduplicate++;
ijbuf_data[i - nduplicate] += ijbuf_data[i];
}
else if(nduplicate > 0)
{
ijbuf_data[i - nduplicate] = ijbuf_data[i];
ijbuf_cols[i - nduplicate] = ijbuf_cols[i];
}
}
(*ijbuf_cnt) -= nduplicate;
ijbuf_numcols[ ijbuf_rowcounter-1 ] -= nduplicate;
return ierr;
}
/**
* Compress the entire buffer, removing duplicate rows
**/
HYPRE_Int
hypre_NonGalerkinIJBufferCompress( HYPRE_Int ijbuf_size,
HYPRE_Int *ijbuf_cnt, /* See NonGalerkinIJBufferWrite for parameter descriptions */
HYPRE_Int *ijbuf_rowcounter,
HYPRE_Real **ijbuf_data,
HYPRE_BigInt **ijbuf_cols,
HYPRE_BigInt **ijbuf_rownums,
HYPRE_Int **ijbuf_numcols)
{
HYPRE_Int ierr = 0;
HYPRE_Int *indys = hypre_CTAlloc(HYPRE_Int, (*ijbuf_rowcounter) , HYPRE_MEMORY_HOST);
HYPRE_Int i, j, duplicate, cnt_new, rowcounter_new, prev_row;
HYPRE_Int row_loc;
HYPRE_BigInt row_start, row_stop, row;
HYPRE_Real *data_new;
HYPRE_BigInt *cols_new;
HYPRE_BigInt *rownums_new;
HYPRE_Int *numcols_new;
/* Do a sort on rownums, but store the original order in indys.
* Then see if there are any duplicate rows */
for(i = 0; i < (*ijbuf_rowcounter); i++)
{ indys[i] = i; }
hypre_BigQsortbi((*ijbuf_rownums), indys, 0, (*ijbuf_rowcounter)-1);
duplicate = 0;
for(i = 1; i < (*ijbuf_rowcounter); i++)
{
if(indys[i] != (indys[i-1]+1))
{
duplicate = 1;
break;
}
}
/* Compress duplicate rows */
if(duplicate)
{
/* Accumulate numcols, so that it functions like a CSR row-pointer */
for(i = 1; i < (*ijbuf_rowcounter); i++)
{ (*ijbuf_numcols)[i] += (*ijbuf_numcols)[i-1]; }
/* Initialize new buffer */
prev_row = -1;
rowcounter_new = 0;
cnt_new = 0;
data_new = hypre_CTAlloc(HYPRE_Real, ijbuf_size, HYPRE_MEMORY_DEVICE);
cols_new = hypre_CTAlloc(HYPRE_BigInt, ijbuf_size, HYPRE_MEMORY_DEVICE);
rownums_new = hypre_CTAlloc(HYPRE_BigInt, ijbuf_size, HYPRE_MEMORY_DEVICE);
numcols_new = hypre_CTAlloc(HYPRE_Int, ijbuf_size, HYPRE_MEMORY_DEVICE);
numcols_new[0] = 0;
/* Cycle through each row */
for(i = 0; i < (*ijbuf_rowcounter); i++)
{
/* Find which row this is in local and global numberings, and where
* this row's data starts and stops in the buffer*/
row_loc = indys[i];
row = (*ijbuf_rownums)[i];
if(row_loc > 0)
{
row_start = (*ijbuf_numcols)[row_loc-1];
row_stop = (*ijbuf_numcols)[row_loc];
}
else
{
row_start = 0;
row_stop = (*ijbuf_numcols)[row_loc];
}
/* Is this a new row? If so, compress previous row, and add a new
* one. Noting that prev_row = -1 is a special value */
if(row != prev_row)
{
if(prev_row != -1)
{
/* Compress previous row */
hypre_NonGalerkinIJBufferCompressRow(&cnt_new, rowcounter_new, data_new,
cols_new, rownums_new, numcols_new);
}
prev_row = row;
numcols_new[rowcounter_new] = 0;
rownums_new[rowcounter_new] = row;
rowcounter_new++;
}
/* Copy row into new buffer */
for(j = row_start; j < row_stop; j++)
{
data_new[cnt_new] = (*ijbuf_data)[j];
cols_new[cnt_new] = (*ijbuf_cols)[j];
numcols_new[rowcounter_new-1]++;
cnt_new++;
}
}
/* Compress the final row */
if(i > 1)
{
hypre_NonGalerkinIJBufferCompressRow(&cnt_new, rowcounter_new, data_new,
cols_new, rownums_new, numcols_new);
}
*ijbuf_cnt = cnt_new;
*ijbuf_rowcounter = rowcounter_new;
/* Point to the new buffer */
hypre_TFree(*ijbuf_data, HYPRE_MEMORY_DEVICE);
hypre_TFree(*ijbuf_cols, HYPRE_MEMORY_DEVICE);
hypre_TFree(*ijbuf_rownums, HYPRE_MEMORY_DEVICE);
hypre_TFree(*ijbuf_numcols, HYPRE_MEMORY_DEVICE);
(*ijbuf_data) = data_new;
(*ijbuf_cols) = cols_new;
(*ijbuf_rownums) = rownums_new;
(*ijbuf_numcols) = numcols_new;
}
hypre_TFree(indys, HYPRE_MEMORY_HOST);
return ierr;
}
/**
* Do a buffered write to an IJ matrix.
* That is, write to the buffer, until the buffer is full. Then when the
* buffer is full, write to the IJ matrix and reset the buffer counters
* In effect, this buffers this operation
* A[row_to_write, col_to_write] += val_to_write
**/
HYPRE_Int
hypre_NonGalerkinIJBufferWrite( HYPRE_IJMatrix B, /* Unassembled matrix to add an entry to */
HYPRE_Int *ijbuf_cnt, /* current buffer size */
HYPRE_Int ijbuf_size, /* max buffer size */
HYPRE_Int *ijbuf_rowcounter, /* num of rows in rownums, (i.e., size of rownums) */
/* This counter will increase as you call this function for multiple rows */
HYPRE_Real **ijbuf_data, /* Array of values, of size ijbuf_size */
HYPRE_BigInt **ijbuf_cols, /* Array of col indices, of size ijbuf_size */
HYPRE_BigInt **ijbuf_rownums, /* Holds row-indices that with numcols makes for a CSR-like data structure*/
HYPRE_Int **ijbuf_numcols, /* rownums[i] is the row num, and numcols holds the number of entries being added */
/* for that row. Note numcols is not cumulative like an actual CSR data structure*/
HYPRE_BigInt row_to_write, /* Entry to add to the buffer */
HYPRE_BigInt col_to_write, /* Ditto */
HYPRE_Real val_to_write ) /* Ditto */
{
HYPRE_Int ierr = 0;
if( (*ijbuf_cnt) == 0 )
{
/* brand new buffer: increment buffer structures for the new row */
hypre_NonGalerkinIJBufferNewRow((*ijbuf_rownums), (*ijbuf_numcols), ijbuf_rowcounter, row_to_write);
}
else if((*ijbuf_rownums)[ (*ijbuf_rowcounter)-1 ] != row_to_write)
{
/* If this is a new row, compress the previous row */
hypre_NonGalerkinIJBufferCompressRow(ijbuf_cnt, (*ijbuf_rowcounter), (*ijbuf_data),
(*ijbuf_cols), (*ijbuf_rownums), (*ijbuf_numcols));
/* increment buffer structures for the new row */
hypre_NonGalerkinIJBufferNewRow( (*ijbuf_rownums), (*ijbuf_numcols), ijbuf_rowcounter, row_to_write);
}
/* Add new entry to buffer */
(*ijbuf_cols)[(*ijbuf_cnt)] = col_to_write;
(*ijbuf_data)[(*ijbuf_cnt)] = val_to_write;
(*ijbuf_numcols)[ (*ijbuf_rowcounter)-1 ]++;
(*ijbuf_cnt)++;
/* Buffer is full, write to the matrix object */
if ( (*ijbuf_cnt) == (ijbuf_size-1) )
{
/* If the last row is empty, decrement rowcounter */
if( (*ijbuf_numcols)[ (*ijbuf_rowcounter)-1 ] == 0)
{ (*ijbuf_rowcounter)--; }
/* Compress and Add Entries */
hypre_NonGalerkinIJBufferCompressRow(ijbuf_cnt, (*ijbuf_rowcounter), (*ijbuf_data),
(*ijbuf_cols), (*ijbuf_rownums), (*ijbuf_numcols));
hypre_NonGalerkinIJBufferCompress(ijbuf_size, ijbuf_cnt, ijbuf_rowcounter, ijbuf_data,
ijbuf_cols, ijbuf_rownums, ijbuf_numcols);
ierr += HYPRE_IJMatrixAddToValues(B, *ijbuf_rowcounter, (*ijbuf_numcols), (*ijbuf_rownums), (*ijbuf_cols), (*ijbuf_data));
/* Reinitialize the buffer */
hypre_NonGalerkinIJBufferInit( ijbuf_cnt, ijbuf_rowcounter, (*ijbuf_numcols));
hypre_NonGalerkinIJBufferNewRow((*ijbuf_rownums), (*ijbuf_numcols), ijbuf_rowcounter, row_to_write);
}
return ierr;
}
/**
* Empty the IJ Buffer with a final AddToValues.
**/
HYPRE_Int
hypre_NonGalerkinIJBufferEmpty(HYPRE_IJMatrix B, /* See NonGalerkinIJBufferWrite for parameter descriptions */
HYPRE_Int ijbuf_size,
HYPRE_Int *ijbuf_cnt,
HYPRE_Int ijbuf_rowcounter,
HYPRE_Real **ijbuf_data,
HYPRE_BigInt **ijbuf_cols,
HYPRE_BigInt **ijbuf_rownums,
HYPRE_Int **ijbuf_numcols)
{
HYPRE_Int ierr = 0;
if( (*ijbuf_cnt) > 0)
{
/* Compress the last row and then write */
hypre_NonGalerkinIJBufferCompressRow(ijbuf_cnt, ijbuf_rowcounter, (*ijbuf_data),
(*ijbuf_cols), (*ijbuf_rownums), (*ijbuf_numcols));
hypre_NonGalerkinIJBufferCompress(ijbuf_size, ijbuf_cnt, &ijbuf_rowcounter, ijbuf_data,
ijbuf_cols, ijbuf_rownums, ijbuf_numcols);
ierr += HYPRE_IJMatrixAddToValues(B, ijbuf_rowcounter, (*ijbuf_numcols), (*ijbuf_rownums), (*ijbuf_cols), (*ijbuf_data));
}
(*ijbuf_cnt = 0);
return ierr;
}
/*
* Construct sparsity pattern based on R_I A P, plus entries required by drop tolerance
*/
hypre_ParCSRMatrix *
hypre_NonGalerkinSparsityPattern(hypre_ParCSRMatrix *R_IAP,
hypre_ParCSRMatrix *RAP,
HYPRE_Int * CF_marker,
HYPRE_Real droptol,
HYPRE_Int sym_collapse,
HYPRE_Int collapse_beta )
{
/* MPI Communicator */
MPI_Comm comm = hypre_ParCSRMatrixComm(RAP);
/* Declare R_IAP */
hypre_CSRMatrix *R_IAP_diag = hypre_ParCSRMatrixDiag(R_IAP);
HYPRE_Int *R_IAP_diag_i = hypre_CSRMatrixI(R_IAP_diag);
HYPRE_Int *R_IAP_diag_j = hypre_CSRMatrixJ(R_IAP_diag);
hypre_CSRMatrix *R_IAP_offd = hypre_ParCSRMatrixOffd(R_IAP);
HYPRE_Int *R_IAP_offd_i = hypre_CSRMatrixI(R_IAP_offd);
HYPRE_Int *R_IAP_offd_j = hypre_CSRMatrixJ(R_IAP_offd);
HYPRE_BigInt *col_map_offd_R_IAP = hypre_ParCSRMatrixColMapOffd(R_IAP);
/* Declare RAP */
hypre_CSRMatrix *RAP_diag = hypre_ParCSRMatrixDiag(RAP);
HYPRE_Int *RAP_diag_i = hypre_CSRMatrixI(RAP_diag);
HYPRE_Real *RAP_diag_data = hypre_CSRMatrixData(RAP_diag);
HYPRE_Int *RAP_diag_j = hypre_CSRMatrixJ(RAP_diag);
HYPRE_BigInt first_col_diag_RAP = hypre_ParCSRMatrixFirstColDiag(RAP);
HYPRE_Int num_cols_diag_RAP = hypre_CSRMatrixNumCols(RAP_diag);
HYPRE_BigInt last_col_diag_RAP = first_col_diag_RAP + (HYPRE_BigInt)num_cols_diag_RAP - 1;
hypre_CSRMatrix *RAP_offd = hypre_ParCSRMatrixOffd(RAP);
HYPRE_Int *RAP_offd_i = hypre_CSRMatrixI(RAP_offd);
HYPRE_Real *RAP_offd_data = NULL;
HYPRE_Int *RAP_offd_j = hypre_CSRMatrixJ(RAP_offd);
HYPRE_BigInt *col_map_offd_RAP = hypre_ParCSRMatrixColMapOffd(RAP);
HYPRE_Int num_cols_RAP_offd = hypre_CSRMatrixNumCols(RAP_offd);
HYPRE_Int num_variables = hypre_CSRMatrixNumRows(RAP_diag);
/* Declare A */
HYPRE_Int num_fine_variables = hypre_CSRMatrixNumRows(R_IAP_diag);
/* Declare IJ matrices */
HYPRE_IJMatrix Pattern;
hypre_ParCSRMatrix *Pattern_CSR = NULL;
/* Buffered IJAddToValues */
HYPRE_Int ijbuf_cnt, ijbuf_size, ijbuf_rowcounter;
HYPRE_Real *ijbuf_data;
HYPRE_BigInt *ijbuf_cols, *ijbuf_rownums;
HYPRE_Int *ijbuf_numcols;
/* Buffered IJAddToValues for Symmetric Entries */
HYPRE_Int ijbuf_sym_cnt, ijbuf_sym_rowcounter;
HYPRE_Real *ijbuf_sym_data;
HYPRE_BigInt *ijbuf_sym_cols, *ijbuf_sym_rownums;
HYPRE_Int *ijbuf_sym_numcols;
/* Other Declarations */
HYPRE_Int ierr = 0;
HYPRE_Real max_entry = 0.0;
HYPRE_Real max_entry_offd = 0.0;
HYPRE_Int * rownz = NULL;
HYPRE_Int i, j, Cpt;
HYPRE_BigInt row_start, row_end, global_row, global_col;
/* Other Setup */
if (num_cols_RAP_offd)
{ RAP_offd_data = hypre_CSRMatrixData(RAP_offd); }
/*
* Initialize the IJ matrix, leveraging our rough knowledge of the
* nonzero structure of Pattern based on RAP
*
* ilower, iupper, jlower, jupper */
ierr += HYPRE_IJMatrixCreate(comm, first_col_diag_RAP, last_col_diag_RAP, first_col_diag_RAP, last_col_diag_RAP, &Pattern);
ierr += HYPRE_IJMatrixSetObjectType(Pattern, HYPRE_PARCSR);
rownz = hypre_CTAlloc(HYPRE_Int, num_variables, HYPRE_MEMORY_HOST);
for(i = 0; i < num_variables; i++)
{ rownz[i] = 1.2*(RAP_diag_i[i+1] - RAP_diag_i[i]) + 1.2*(RAP_offd_i[i+1] - RAP_offd_i[i]); }
HYPRE_IJMatrixSetRowSizes(Pattern, rownz);
ierr += HYPRE_IJMatrixInitialize(Pattern);
hypre_TFree(rownz, HYPRE_MEMORY_HOST);
/*
*For efficiency, we do a buffered IJAddToValues.
* Here, we initialize the buffer and then initialize the buffer counters
*/
ijbuf_size = 1000;
ijbuf_data = hypre_CTAlloc(HYPRE_Real, ijbuf_size, HYPRE_MEMORY_DEVICE);
ijbuf_cols = hypre_CTAlloc(HYPRE_BigInt, ijbuf_size, HYPRE_MEMORY_DEVICE);
ijbuf_rownums = hypre_CTAlloc(HYPRE_BigInt, ijbuf_size, HYPRE_MEMORY_DEVICE);
ijbuf_numcols = hypre_CTAlloc(HYPRE_Int, ijbuf_size, HYPRE_MEMORY_DEVICE);
hypre_NonGalerkinIJBigBufferInit( &ijbuf_cnt, &ijbuf_rowcounter, ijbuf_cols );
if(sym_collapse)
{
ijbuf_sym_data = hypre_CTAlloc(HYPRE_Real, ijbuf_size, HYPRE_MEMORY_DEVICE);
ijbuf_sym_cols = hypre_CTAlloc(HYPRE_BigInt, ijbuf_size, HYPRE_MEMORY_DEVICE);
ijbuf_sym_rownums= hypre_CTAlloc(HYPRE_BigInt, ijbuf_size, HYPRE_MEMORY_DEVICE);
ijbuf_sym_numcols= hypre_CTAlloc(HYPRE_Int, ijbuf_size, HYPRE_MEMORY_DEVICE);
hypre_NonGalerkinIJBigBufferInit( &ijbuf_sym_cnt, &ijbuf_sym_rowcounter, ijbuf_sym_cols );
}
/*
* Place entries in R_IAP into Pattern
*/
Cpt = -1; /* Cpt contains the fine grid index of the i-th Cpt */
for(i = 0; i < num_variables; i++)
{
global_row = i+first_col_diag_RAP;
/* Find the next Coarse Point in CF_marker */
for(j = Cpt+1; j < num_fine_variables; j++)
{
if(CF_marker[j] == 1) /* Found Next C-point */
{
Cpt = j;
break;
}
}
/* Diag Portion */
row_start = R_IAP_diag_i[Cpt];
row_end = R_IAP_diag_i[Cpt+1];
for(j = row_start; j < row_end; j++)
{
global_col = R_IAP_diag_j[j] + first_col_diag_RAP;
/* This call adds a 1 x 1 to i j data */
hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
global_col, 1.0);
if (sym_collapse)
{
hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_sym_cnt,
ijbuf_size, &ijbuf_sym_rowcounter, &ijbuf_sym_data,
&ijbuf_sym_cols, &ijbuf_sym_rownums, &ijbuf_sym_numcols,
global_col, global_row, 1.0);
}
}
/* Offdiag Portion */
row_start = R_IAP_offd_i[Cpt];
row_end = R_IAP_offd_i[Cpt+1];
for(j = row_start; j < row_end; j++)
{
global_col = col_map_offd_R_IAP[ R_IAP_offd_j[j] ];
/* This call adds a 1 x 1 to i j data */
hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
global_col, 1.0);
if (sym_collapse)
{
hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_sym_cnt,
ijbuf_size, &ijbuf_sym_rowcounter, &ijbuf_sym_data,
&ijbuf_sym_cols, &ijbuf_sym_rownums, &ijbuf_sym_numcols,
global_col, global_row, 1.0);
}
}
}
/*
* Use drop-tolerance to compute new entries for sparsity pattern
*/
/*#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,max_entry,max_entry_offd,global_col,global_row) HYPRE_SMP_SCHEDULE
#endif */
for(i = 0; i < num_variables; i++)
{
global_row = i+first_col_diag_RAP;
/* Compute the drop tolerance for this row, which is just
* abs(max of row i)*droptol */
max_entry = -1.0;
for(j = RAP_diag_i[i]; j < RAP_diag_i[i+1]; j++)
{
if( (RAP_diag_j[j] != i) && (max_entry < fabs(RAP_diag_data[j]) ) )
{ max_entry = fabs(RAP_diag_data[j]); }
}
for(j = RAP_offd_i[i]; j < RAP_offd_i[i+1]; j++)
{
{
if( max_entry < fabs(RAP_offd_data[j]) )
{ max_entry = fabs(RAP_offd_data[j]); }
}
}
max_entry *= droptol;
max_entry_offd = max_entry*collapse_beta;
/* Loop over diag portion, adding all entries that are "strong" */
for(j = RAP_diag_i[i]; j < RAP_diag_i[i+1]; j++)
{
if( fabs(RAP_diag_data[j]) > max_entry )
{
global_col = RAP_diag_j[j] + first_col_diag_RAP;
/*#ifdef HYPRE_USING_OPENMP
#pragma omp critical (IJAdd)
#endif
{*/
/* For efficiency, we do a buffered IJAddToValues
* A[global_row, global_col] += 1.0 */
hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
global_col, 1.0 );
if(sym_collapse)
{
hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_sym_cnt,
ijbuf_size, &ijbuf_sym_rowcounter, &ijbuf_sym_data,
&ijbuf_sym_cols, &ijbuf_sym_rownums, &ijbuf_sym_numcols,
global_col, global_row, 1.0 );
}
/*}*/
}
}
/* Loop over offd portion, adding all entries that are "strong" */
for(j = RAP_offd_i[i]; j < RAP_offd_i[i+1]; j++)
{
if( fabs(RAP_offd_data[j]) > max_entry_offd )
{
global_col = col_map_offd_RAP[ RAP_offd_j[j] ];
/*#ifdef HYPRE_USING_OPENMP
#pragma omp critical (IJAdd)
#endif
{*/
/* For efficiency, we do a buffered IJAddToValues
* A[global_row, global_col] += 1.0 */
hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
global_col, 1.0 );
if(sym_collapse)
{
hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_sym_cnt,
ijbuf_size, &ijbuf_sym_rowcounter, &ijbuf_sym_data,
&ijbuf_sym_cols, &ijbuf_sym_rownums, &ijbuf_sym_numcols,
global_col, global_row, 1.0 );
}
/*}*/
}
}
}
/* For efficiency, we do a buffered IJAddToValues.
* This empties the buffer of any remaining values */
hypre_NonGalerkinIJBufferEmpty(Pattern, ijbuf_size, &ijbuf_cnt, ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols);
if(sym_collapse)
hypre_NonGalerkinIJBufferEmpty(Pattern, ijbuf_size, &ijbuf_sym_cnt, ijbuf_sym_rowcounter,
&ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols);
/* Finalize Construction of Pattern */
ierr += HYPRE_IJMatrixAssemble(Pattern);
ierr += HYPRE_IJMatrixGetObject( Pattern, (void**) &Pattern_CSR );
/* Deallocate */
ierr += HYPRE_IJMatrixSetObjectType(Pattern, -1);
ierr += HYPRE_IJMatrixDestroy(Pattern);
hypre_TFree(ijbuf_data, HYPRE_MEMORY_DEVICE);
hypre_TFree(ijbuf_cols, HYPRE_MEMORY_DEVICE);
hypre_TFree(ijbuf_rownums, HYPRE_MEMORY_DEVICE);
hypre_TFree(ijbuf_numcols, HYPRE_MEMORY_DEVICE);
if(sym_collapse)
{
hypre_TFree(ijbuf_sym_data, HYPRE_MEMORY_DEVICE);
hypre_TFree(ijbuf_sym_cols, HYPRE_MEMORY_DEVICE);
hypre_TFree(ijbuf_sym_rownums, HYPRE_MEMORY_DEVICE);
hypre_TFree(ijbuf_sym_numcols, HYPRE_MEMORY_DEVICE);
}
return Pattern_CSR;
}
HYPRE_Int
hypre_BoomerAMGBuildNonGalerkinCoarseOperator( hypre_ParCSRMatrix **RAP_ptr,
hypre_ParCSRMatrix *AP,
HYPRE_Real strong_threshold,
HYPRE_Real max_row_sum,
HYPRE_Int num_functions,
HYPRE_Int * dof_func_value,
HYPRE_Int * CF_marker,
HYPRE_Real droptol, HYPRE_Int sym_collapse,
HYPRE_Real lump_percent, HYPRE_Int collapse_beta )
{
/* Initializations */
MPI_Comm comm = hypre_ParCSRMatrixComm(*RAP_ptr);
hypre_ParCSRMatrix *S = NULL;
hypre_ParCSRMatrix *RAP = *RAP_ptr;
HYPRE_Int i, j, k, row_start, row_end, value, num_cols_offd_Sext, num_procs;
HYPRE_Int S_ext_diag_size, S_ext_offd_size, last_col_diag_RAP, cnt_offd, cnt_diag, cnt;
HYPRE_Int col_indx_Pattern, current_Pattern_j, col_indx_RAP;
/* HYPRE_Real start_time = hypre_MPI_Wtime(); */
/* HYPRE_Real end_time; */
HYPRE_BigInt *temp = NULL;
HYPRE_Int ierr = 0;
char filename[256];
/* Lumping related variables */
HYPRE_IJMatrix ijmatrix;
HYPRE_BigInt * Pattern_offd_indices = NULL;
HYPRE_BigInt * S_offd_indices = NULL;
HYPRE_BigInt * offd_intersection = NULL;
HYPRE_Real * offd_intersection_data = NULL;
HYPRE_Int * diag_intersection = NULL;
HYPRE_Real * diag_intersection_data = NULL;
HYPRE_Int Pattern_offd_indices_len = 0;
HYPRE_Int Pattern_offd_indices_allocated_len= 0;
HYPRE_Int S_offd_indices_len = 0;
HYPRE_Int S_offd_indices_allocated_len = 0;
HYPRE_Int offd_intersection_len = 0;
HYPRE_Int offd_intersection_allocated_len = 0;
HYPRE_Int diag_intersection_len = 0;
HYPRE_Int diag_intersection_allocated_len = 0;
HYPRE_Real intersection_len = 0;
HYPRE_Int * Pattern_indices_ptr = NULL;
HYPRE_Int Pattern_diag_indices_len = 0;
HYPRE_Int global_row = 0;
HYPRE_Int has_row_ended = 0;
HYPRE_Real lump_value = 0.;
HYPRE_Real diagonal_lump_value = 0.;
HYPRE_Real neg_lump_value = 0.;
HYPRE_Real sum_strong_neigh = 0.;
HYPRE_Int * rownz = NULL;
/* offd and diag portions of RAP */
hypre_CSRMatrix *RAP_diag = hypre_ParCSRMatrixDiag(RAP);
HYPRE_Int *RAP_diag_i = hypre_CSRMatrixI(RAP_diag);
HYPRE_Real *RAP_diag_data = hypre_CSRMatrixData(RAP_diag);
HYPRE_Int *RAP_diag_j = hypre_CSRMatrixJ(RAP_diag);
HYPRE_BigInt first_col_diag_RAP = hypre_ParCSRMatrixFirstColDiag(RAP);
HYPRE_Int num_cols_diag_RAP = hypre_CSRMatrixNumCols(RAP_diag);
hypre_CSRMatrix *RAP_offd = hypre_ParCSRMatrixOffd(RAP);
HYPRE_Int *RAP_offd_i = hypre_CSRMatrixI(RAP_offd);
HYPRE_Real *RAP_offd_data = NULL;
HYPRE_Int *RAP_offd_j = hypre_CSRMatrixJ(RAP_offd);
HYPRE_BigInt *col_map_offd_RAP = hypre_ParCSRMatrixColMapOffd(RAP);
HYPRE_Int num_cols_RAP_offd = hypre_CSRMatrixNumCols(RAP_offd);
HYPRE_Int num_variables = hypre_CSRMatrixNumRows(RAP_diag);
HYPRE_BigInt global_num_vars = hypre_ParCSRMatrixGlobalNumRows(RAP);
/* offd and diag portions of S */
hypre_CSRMatrix *S_diag = NULL;
HYPRE_Int *S_diag_i = NULL;
HYPRE_Real *S_diag_data = NULL;
HYPRE_Int *S_diag_j = NULL;
hypre_CSRMatrix *S_offd = NULL;
HYPRE_Int *S_offd_i = NULL;
HYPRE_Real *S_offd_data = NULL;
HYPRE_Int *S_offd_j = NULL;
HYPRE_BigInt *col_map_offd_S = NULL;
HYPRE_Int num_cols_offd_S;
/* HYPRE_Int num_nonzeros_S_diag; */
/* off processor portions of S */
hypre_CSRMatrix *S_ext = NULL;
HYPRE_Int *S_ext_i = NULL;
HYPRE_Real *S_ext_data = NULL;
HYPRE_BigInt *S_ext_j = NULL;
HYPRE_Int *S_ext_diag_i = NULL;
HYPRE_Real *S_ext_diag_data = NULL;
HYPRE_Int *S_ext_diag_j = NULL;
HYPRE_Int *S_ext_offd_i = NULL;
HYPRE_Real *S_ext_offd_data = NULL;
HYPRE_Int *S_ext_offd_j = NULL;
HYPRE_BigInt *col_map_offd_Sext = NULL;
/* HYPRE_Int num_nonzeros_S_ext_diag;
HYPRE_Int num_nonzeros_S_ext_offd;
HYPRE_Int num_rows_Sext = 0; */
HYPRE_Int row_indx_Sext = 0;
/* offd and diag portions of Pattern */
hypre_ParCSRMatrix *Pattern = NULL;
hypre_CSRMatrix *Pattern_diag = NULL;
HYPRE_Int *Pattern_diag_i = NULL;
HYPRE_Real *Pattern_diag_data = NULL;
HYPRE_Int *Pattern_diag_j = NULL;
hypre_CSRMatrix *Pattern_offd = NULL;
HYPRE_Int *Pattern_offd_i = NULL;
HYPRE_Real *Pattern_offd_data = NULL;
HYPRE_Int *Pattern_offd_j = NULL;
HYPRE_BigInt *col_map_offd_Pattern = NULL;
HYPRE_Int num_cols_Pattern_offd;
HYPRE_Int my_id;
/* Buffered IJAddToValues */
HYPRE_Int ijbuf_cnt, ijbuf_size, ijbuf_rowcounter;
HYPRE_Real *ijbuf_data;
HYPRE_BigInt *ijbuf_cols, *ijbuf_rownums;
HYPRE_Int *ijbuf_numcols;
/* Buffered IJAddToValues for Symmetric Entries */
HYPRE_Int ijbuf_sym_cnt, ijbuf_sym_rowcounter;
HYPRE_Real *ijbuf_sym_data;
HYPRE_BigInt *ijbuf_sym_cols, *ijbuf_sym_rownums;
HYPRE_Int *ijbuf_sym_numcols;
/* Further Initializations */
if (num_cols_RAP_offd)
{ RAP_offd_data = hypre_CSRMatrixData(RAP_offd); }
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
/* Compute Sparsity Pattern */
Pattern = hypre_NonGalerkinSparsityPattern(AP, RAP, CF_marker, droptol, sym_collapse, collapse_beta);
Pattern_diag = hypre_ParCSRMatrixDiag(Pattern);
Pattern_diag_i = hypre_CSRMatrixI(Pattern_diag);
Pattern_diag_data = hypre_CSRMatrixData(Pattern_diag);
Pattern_diag_j = hypre_CSRMatrixJ(Pattern_diag);
Pattern_offd = hypre_ParCSRMatrixOffd(Pattern);
Pattern_offd_i = hypre_CSRMatrixI(Pattern_offd);
Pattern_offd_j = hypre_CSRMatrixJ(Pattern_offd);
col_map_offd_Pattern = hypre_ParCSRMatrixColMapOffd(Pattern);
num_cols_Pattern_offd = hypre_CSRMatrixNumCols(Pattern_offd);
if (num_cols_Pattern_offd)
{ Pattern_offd_data = hypre_CSRMatrixData(Pattern_offd); }
/**
* Fill in the entries of Pattern with entries from RAP
**/
/* First, sort column indices in RAP and Pattern */
for(i = 0; i < num_variables; i++)
{
/* The diag matrices store the diagonal as first element in each row.
* We maintain that for the case of Pattern and RAP, because the
* strength of connection routine relies on it and we need to ignore
* diagonal entries in Pattern later during set intersections.
* */
/* Sort diag portion of RAP */
row_start = RAP_diag_i[i];
if( RAP_diag_j[row_start] == i)
{ row_start = row_start + 1; }
row_end = RAP_diag_i[i+1];
hypre_qsort1(RAP_diag_j, RAP_diag_data, row_start, row_end-1 );
/* Sort diag portion of Pattern */
row_start = Pattern_diag_i[i];
if( Pattern_diag_j[row_start] == i)
{ row_start = row_start + 1; }
row_end = Pattern_diag_i[i+1];
hypre_qsort1(Pattern_diag_j, Pattern_diag_data, row_start, row_end-1 );
/* Sort offd portion of RAP */
row_start = RAP_offd_i[i];
row_end = RAP_offd_i[i+1];
hypre_qsort1(RAP_offd_j, RAP_offd_data, row_start, row_end-1 );
/* Sort offd portion of Pattern */
/* Be careful to map coarse dof i with CF_marker into Pattern */
row_start = Pattern_offd_i[i];
row_end = Pattern_offd_i[i+1];
hypre_qsort1(Pattern_offd_j, Pattern_offd_data, row_start, row_end-1 );
}
/* Create Strength matrix based on RAP or Pattern. If Pattern is used,
* then the SortedCopyParCSRData(...) function call must also be commented
* back in */
/* hypre_SortedCopyParCSRData(RAP, Pattern); */
if(0)
{
/* hypre_BoomerAMG_MyCreateS(Pattern, strong_threshold, max_row_sum, */
hypre_BoomerAMG_MyCreateS(RAP, strong_threshold, max_row_sum,
num_functions, dof_func_value, &S);
}
else
{
/* Passing in "1, NULL" because dof_array is not needed
* because we assume that the number of functions is 1 */
/* hypre_BoomerAMG_MyCreateS(Pattern, strong_threshold, max_row_sum,*/
hypre_BoomerAMG_MyCreateS(RAP, strong_threshold, max_row_sum,
1, NULL, &S);
}
/* Grab diag and offd parts of S */
S_diag = hypre_ParCSRMatrixDiag(S);
S_diag_i = hypre_CSRMatrixI(S_diag);
S_diag_j = hypre_CSRMatrixJ(S_diag);
S_diag_data = hypre_CSRMatrixData(S_diag);
S_offd = hypre_ParCSRMatrixOffd(S);
S_offd_i = hypre_CSRMatrixI(S_offd);
S_offd_j = hypre_CSRMatrixJ(S_offd);
S_offd_data = hypre_CSRMatrixData(S_offd);
col_map_offd_S = hypre_ParCSRMatrixColMapOffd(S);
num_cols_offd_S = hypre_CSRMatrixNumCols(S_offd);
/* num_nonzeros_S_diag = S_diag_i[num_variables]; */
/* Grab part of S that is distance one away from the local rows
* This is needed later for the stencil collapsing. This section
* of the code mimics par_rap.c when it extracts Ps_ext.
* When moving from par_rap.c, the variable name changes were:
* A --> RAP
* P --> S
* Ps_ext --> S_ext
* P_ext_diag --> S_ext_diag
* P_ext_offd --> S_ext_offd
*
* The data layout of S_ext as returned by ExtractBExt gives you only global
* column indices, and must be converted to the local numbering. This code
* section constructs S_ext_diag and S_ext_offd, which are the distance 1
* couplings in S based on the sparsity structure in RAP.
* --> S_ext_diag corresponds to the same column slice that RAP_diag
* corresponds to. Thus, the column indexing is the same as in
* RAP_diag such that S_ext_diag_j[k] just needs to be offset by
* the RAP_diag first global dof offset.
* --> S_ext_offd column indexing is a little more complicated, and
* requires the computation below of col_map_S_ext_offd, which
* maps the local 0,1,2,... column indexing in S_ext_offd to global
* dof numbers. Note, that the num_cols_RAP_offd is NOT equal to
* num_cols_offd_S_ext
* --> The row indexing of S_ext_diag|offd is as follows. Use
* col_map_offd_RAP, where the first index corresponds to the
* first global row index in S_ext_diag|offd. Remember that ExtractBExt
* grabs the information from S required for locally computing
* (RAP*S)[proc_k row slice, :] */
if (num_procs > 1)
{
S_ext = hypre_ParCSRMatrixExtractBExt(S,RAP,1);
S_ext_data = hypre_CSRMatrixData(S_ext);
S_ext_i = hypre_CSRMatrixI(S_ext);
S_ext_j = hypre_CSRMatrixBigJ(S_ext);
}
/* This uses the num_cols_RAP_offd to set S_ext_diag|offd_i, because S_ext
* is the off-processor information needed to compute RAP*S. That is,
* num_cols_RAP_offd represents the number of rows needed from S_ext for
* the multiplication */
S_ext_diag_i = hypre_CTAlloc(HYPRE_Int, num_cols_RAP_offd+1, HYPRE_MEMORY_HOST);
S_ext_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols_RAP_offd+1, HYPRE_MEMORY_HOST);
S_ext_diag_size = 0;
S_ext_offd_size = 0;
/* num_rows_Sext = num_cols_RAP_offd; */
last_col_diag_RAP = first_col_diag_RAP + num_cols_diag_RAP - 1;
/* construct the S_ext_diag and _offd row-pointer arrays by counting elements
* This looks to create offd and diag blocks related to the local rows belonging
* to this processor...we may not need to split up S_ext this way...or we could.
* It would make for faster binary searching and set intersecting later...this will
* be the bottle neck so LETS SPLIT THIS UP Between offd and diag*/
for (i=0; i < num_cols_RAP_offd; i++)
{
for (j=S_ext_i[i]; j < S_ext_i[i+1]; j++)
if (S_ext_j[j] < first_col_diag_RAP || S_ext_j[j] > last_col_diag_RAP)
S_ext_offd_size++;
else
S_ext_diag_size++;
S_ext_diag_i[i+1] = S_ext_diag_size;
S_ext_offd_i[i+1] = S_ext_offd_size;
}
if (S_ext_diag_size)
{
S_ext_diag_j = hypre_CTAlloc(HYPRE_Int, S_ext_diag_size, HYPRE_MEMORY_HOST);
S_ext_diag_data = hypre_CTAlloc(HYPRE_Real, S_ext_diag_size, HYPRE_MEMORY_HOST);
}
if (S_ext_offd_size)
{
S_ext_offd_j = hypre_CTAlloc(HYPRE_Int, S_ext_offd_size, HYPRE_MEMORY_HOST);
S_ext_offd_data = hypre_CTAlloc(HYPRE_Real, S_ext_offd_size, HYPRE_MEMORY_HOST);
}
/* This copies over the column indices into the offd and diag parts.
* The diag portion has it's local column indices shifted to start at 0.
* The offd portion requires more work to construct the col_map_offd array
* and a local column ordering. */
cnt_offd = 0;
cnt_diag = 0;
cnt = 0;
for (i=0; i < num_cols_RAP_offd; i++)
{
for (j=S_ext_i[i]; j < S_ext_i[i+1]; j++)
if (S_ext_j[j] < first_col_diag_RAP || S_ext_j[j] > last_col_diag_RAP)
{
S_ext_offd_data[cnt_offd] = S_ext_data[j];
//S_ext_offd_j[cnt_offd++] = S_ext_j[j];
S_ext_j[cnt_offd++] = S_ext_j[j];
}
else
{
S_ext_diag_data[cnt_diag] = S_ext_data[j];
S_ext_diag_j[cnt_diag++] = (HYPRE_Int)(S_ext_j[j] - first_col_diag_RAP);
}
}
/* This creates col_map_offd_Sext */
if (S_ext_offd_size || num_cols_offd_S)
{
temp = hypre_CTAlloc(HYPRE_BigInt, S_ext_offd_size+num_cols_offd_S, HYPRE_MEMORY_HOST);
for (i=0; i < S_ext_offd_size; i++)
temp[i] = S_ext_j[i];
cnt = S_ext_offd_size;
for (i=0; i < num_cols_offd_S; i++)
temp[cnt++] = col_map_offd_S[i];
}
if (cnt)
{
/* after this, the first so many entries of temp will hold the
* unique column indices in S_ext_offd_j unioned with the indices
* in col_map_offd_S */
hypre_BigQsort0(temp, 0, cnt-1);
num_cols_offd_Sext = 1;
value = temp[0];
for (i=1; i < cnt; i++)
{
if (temp[i] > value)
{
value = temp[i];
temp[num_cols_offd_Sext++] = value;
}
}
}
else
{
num_cols_offd_Sext = 0;
}
/* num_nonzeros_S_ext_diag = cnt_diag;
num_nonzeros_S_ext_offd = S_ext_offd_size; */
if (num_cols_offd_Sext)
col_map_offd_Sext = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_Sext, HYPRE_MEMORY_HOST);
for (i=0; i < num_cols_offd_Sext; i++)
col_map_offd_Sext[i] = temp[i];
if (S_ext_offd_size || num_cols_offd_S)
hypre_TFree(temp, HYPRE_MEMORY_HOST);
/* look for S_ext_offd_j[i] in col_map_offd_Sext, and set S_ext_offd_j[i]
* to the index of that column value in col_map_offd_Sext */
for (i=0 ; i < S_ext_offd_size; i++)
S_ext_offd_j[i] = hypre_BigBinarySearch(col_map_offd_Sext,
S_ext_j[i],
num_cols_offd_Sext);
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(S_ext);
S_ext = NULL;
}
/* Need to sort column indices in S and S_ext */
for(i = 0; i < num_variables; i++)
{
/* Re-Sort diag portion of Pattern, placing the diagonal entry in a
* sorted position */
row_start = Pattern_diag_i[i];
row_end = Pattern_diag_i[i+1];
hypre_qsort1(Pattern_diag_j, Pattern_diag_data, row_start, row_end-1 );
/* Sort diag portion of S, noting that no diagonal entry */
/* S has not "data" array...it's just NULL */
row_start = S_diag_i[i];
row_end = S_diag_i[i+1];
hypre_qsort1(S_diag_j, S_diag_data, row_start, row_end-1 );
/* Sort offd portion of S */
/* S has no "data" array...it's just NULL */
row_start = S_offd_i[i];
row_end = S_offd_i[i+1];
hypre_qsort1(S_offd_j, S_offd_data, row_start, row_end-1 );
}
/* Sort S_ext
* num_cols_RAP_offd equals num_rows for S_ext*/
for(i = 0; i < num_cols_RAP_offd; i++)
{
/* Sort diag portion of S_ext */
row_start = S_ext_diag_i[i];
row_end = S_ext_diag_i[i+1];
hypre_qsort1(S_ext_diag_j, S_ext_diag_data, row_start, row_end-1 );
/* Sort offd portion of S_ext */
row_start = S_ext_offd_i[i];
row_end = S_ext_offd_i[i+1];
hypre_qsort1(S_ext_offd_j, S_ext_offd_data, row_start, row_end-1 );
}
/*
* Now, for the fun stuff -- Computing the Non-Galerkin Operator
*/
/* Initialize the ijmatrix, leveraging our knowledge of the nonzero
* structure in Pattern */
ierr += HYPRE_IJMatrixCreate(comm, first_col_diag_RAP, last_col_diag_RAP,
first_col_diag_RAP, last_col_diag_RAP, &ijmatrix);
ierr += HYPRE_IJMatrixSetObjectType(ijmatrix, HYPRE_PARCSR);
rownz = hypre_CTAlloc(HYPRE_Int, num_variables, HYPRE_MEMORY_HOST);
for(i = 0; i < num_variables; i++)
{ rownz[i] = 1.2*(Pattern_diag_i[i+1] - Pattern_diag_i[i]) + 1.2*(Pattern_offd_i[i+1] - Pattern_offd_i[i]); }
HYPRE_IJMatrixSetRowSizes(ijmatrix, rownz);
ierr += HYPRE_IJMatrixInitialize(ijmatrix);
hypre_TFree(rownz, HYPRE_MEMORY_HOST);
/*
*For efficiency, we do a buffered IJAddToValues.
* Here, we initialize the buffer and then initialize the buffer counters
*/
ijbuf_size = 1000;
ijbuf_data = hypre_CTAlloc(HYPRE_Real, ijbuf_size, HYPRE_MEMORY_DEVICE);
ijbuf_cols = hypre_CTAlloc(HYPRE_BigInt, ijbuf_size, HYPRE_MEMORY_DEVICE);
ijbuf_rownums = hypre_CTAlloc(HYPRE_BigInt, ijbuf_size, HYPRE_MEMORY_DEVICE);
ijbuf_numcols = hypre_CTAlloc(HYPRE_Int, ijbuf_size, HYPRE_MEMORY_DEVICE);
hypre_NonGalerkinIJBigBufferInit( &ijbuf_cnt, &ijbuf_rowcounter, ijbuf_cols );
if(sym_collapse)
{
ijbuf_sym_data = hypre_CTAlloc(HYPRE_Real, ijbuf_size, HYPRE_MEMORY_DEVICE);
ijbuf_sym_cols = hypre_CTAlloc(HYPRE_BigInt, ijbuf_size, HYPRE_MEMORY_DEVICE);
ijbuf_sym_rownums= hypre_CTAlloc(HYPRE_BigInt, ijbuf_size, HYPRE_MEMORY_DEVICE);
ijbuf_sym_numcols= hypre_CTAlloc(HYPRE_Int, ijbuf_size, HYPRE_MEMORY_DEVICE);
hypre_NonGalerkinIJBigBufferInit( &ijbuf_sym_cnt, &ijbuf_sym_rowcounter, ijbuf_sym_cols );
}
/*
* Eliminate Entries In RAP_diag
* */
for(i = 0; i < num_variables; i++)
{
global_row = i+first_col_diag_RAP;
row_start = RAP_diag_i[i];
row_end = RAP_diag_i[i+1];
has_row_ended = 0;
/* Only do work if row has nonzeros */
if( row_start < row_end)
{
/* Grab pointer to current entry in Pattern_diag */
current_Pattern_j = Pattern_diag_i[i];
col_indx_Pattern = Pattern_diag_j[current_Pattern_j];
/* Grab this row's indices out of Pattern offd and diag. This will
* be for computing index set intersections for lumping */
/* Ensure adequate length */
Pattern_offd_indices_len = Pattern_offd_i[i+1] - Pattern_offd_i[i];
if(Pattern_offd_indices_allocated_len < Pattern_offd_indices_len)
{
hypre_TFree(Pattern_offd_indices, HYPRE_MEMORY_HOST);
Pattern_offd_indices = hypre_CTAlloc(HYPRE_BigInt, Pattern_offd_indices_len, HYPRE_MEMORY_HOST);
Pattern_offd_indices_allocated_len = Pattern_offd_indices_len;
}
/* Grab sub array from col_map, corresponding to the slice of Pattern_offd_j */
hypre_GrabSubArray(Pattern_offd_j,
Pattern_offd_i[i], Pattern_offd_i[i+1]-1,
col_map_offd_Pattern, Pattern_offd_indices);
/* No need to grab info out of Pattern_diag_j[...], here we just start from
* Pattern_diag_i[i] and end at index Pattern_diag_i[i+1] - 1. We do need to
* ignore the diagonal entry in Pattern, because we don't lump entries there */
if( Pattern_diag_j[Pattern_diag_i[i]] == i )
{
Pattern_indices_ptr = &( Pattern_diag_j[Pattern_diag_i[i]+1]);
Pattern_diag_indices_len = Pattern_diag_i[i+1] - Pattern_diag_i[i] - 1;
}
else
{
Pattern_indices_ptr = &( Pattern_diag_j[Pattern_diag_i[i]]);
Pattern_diag_indices_len = Pattern_diag_i[i+1] - Pattern_diag_i[i];
}
}
for(j = row_start; j < row_end; j++)
{
col_indx_RAP = RAP_diag_j[j];
/* Ignore zero entries in RAP */
if( RAP_diag_data[j] != 0.0)
{
/* Don't change the diagonal, just write it */
if(col_indx_RAP == i)
{
/*#ifdef HY PRE_USING_OPENMP
#pragma omp critical (IJAdd)
#endif
{*/
/* For efficiency, we do a buffered IJAddToValues.
* A[global_row, global_row] += RAP_diag_data[j] */
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
global_row, RAP_diag_data[j] );
/*}*/
}
/* The entry in RAP does not appear in Pattern, so LUMP it */
else if( (col_indx_RAP < col_indx_Pattern) || has_row_ended)
{
/* Lump entry (i, col_indx_RAP) in RAP */
/* Grab the indices for row col_indx_RAP of S_offd and diag. This will
* be for computing lumping locations */
S_offd_indices_len = S_offd_i[col_indx_RAP+1] - S_offd_i[col_indx_RAP];
if(S_offd_indices_allocated_len < S_offd_indices_len)
{
hypre_TFree(S_offd_indices, HYPRE_MEMORY_HOST);
S_offd_indices = hypre_CTAlloc(HYPRE_BigInt, S_offd_indices_len, HYPRE_MEMORY_HOST);
S_offd_indices_allocated_len = S_offd_indices_len;
}
/* Grab sub array from col_map, corresponding to the slice of S_offd_j */
hypre_GrabSubArray(S_offd_j, S_offd_i[col_indx_RAP], S_offd_i[col_indx_RAP+1]-1,
col_map_offd_S, S_offd_indices);
/* No need to grab info out of S_diag_j[...], here we just start from
* S_diag_i[col_indx_RAP] and end at index S_diag_i[col_indx_RAP+1] - 1 */
/* Intersect the diag and offd pieces, remembering that the
* diag array will need to have the offset +first_col_diag_RAP */
cnt = hypre_max(S_offd_indices_len, Pattern_offd_indices_len);
if(offd_intersection_allocated_len < cnt)
{
hypre_TFree(offd_intersection, HYPRE_MEMORY_HOST);
hypre_TFree(offd_intersection_data, HYPRE_MEMORY_HOST);
offd_intersection = hypre_CTAlloc(HYPRE_BigInt, cnt, HYPRE_MEMORY_HOST);
offd_intersection_data = hypre_CTAlloc(HYPRE_Real, cnt, HYPRE_MEMORY_HOST);
offd_intersection_allocated_len = cnt;
}
/* This intersection also tracks S_offd_data and assumes that
* S_offd_indices is the first argument here */
hypre_IntersectTwoBigArrays(S_offd_indices,
&(S_offd_data[ S_offd_i[col_indx_RAP] ]),
S_offd_indices_len,
Pattern_offd_indices,
Pattern_offd_indices_len,
offd_intersection,
offd_intersection_data,
&offd_intersection_len);
/* Now, intersect the indices for the diag block. Note that S_diag_j does
* not have a diagonal entry, so no lumping occurs to the diagonal. */
cnt = hypre_max(Pattern_diag_indices_len,
S_diag_i[col_indx_RAP+1] - S_diag_i[col_indx_RAP] );
if(diag_intersection_allocated_len < cnt)
{
hypre_TFree(diag_intersection, HYPRE_MEMORY_HOST);
hypre_TFree(diag_intersection_data, HYPRE_MEMORY_HOST);
diag_intersection = hypre_CTAlloc(HYPRE_Int, cnt, HYPRE_MEMORY_HOST);
diag_intersection_data = hypre_CTAlloc(HYPRE_Real, cnt, HYPRE_MEMORY_HOST);
diag_intersection_allocated_len = cnt;
}
/* There is no diagonal entry in first position of S */
hypre_IntersectTwoArrays( &(S_diag_j[S_diag_i[col_indx_RAP]]),
&(S_diag_data[ S_diag_i[col_indx_RAP] ]),
S_diag_i[col_indx_RAP+1] - S_diag_i[col_indx_RAP],
Pattern_indices_ptr,
Pattern_diag_indices_len,
diag_intersection,
diag_intersection_data,
&diag_intersection_len);
/* Loop over these intersections, and lump a constant fraction of
* RAP_diag_data[j] to each entry */
intersection_len = diag_intersection_len + offd_intersection_len;
if(intersection_len > 0)
{
/* Sum the strength-of-connection values from row
* col_indx_RAP in S, corresponding to the indices we are
* collapsing to in row i This will give us our collapsing
* weights. */
sum_strong_neigh = 0.0;
for(k = 0; k < diag_intersection_len; k++)
{ sum_strong_neigh += fabs(diag_intersection_data[k]); }
for(k = 0; k < offd_intersection_len; k++)
{ sum_strong_neigh += fabs(offd_intersection_data[k]); }
sum_strong_neigh = RAP_diag_data[j]/sum_strong_neigh;
/* When lumping with the diag_intersection, must offset column index */
for(k = 0; k < diag_intersection_len; k++)
{
lump_value = lump_percent * fabs(diag_intersection_data[k])*sum_strong_neigh;
diagonal_lump_value = (1.0 - lump_percent) * fabs(diag_intersection_data[k])*sum_strong_neigh;
neg_lump_value = -1.0 * lump_value;
cnt = diag_intersection[k]+first_col_diag_RAP;
/*#ifdef HY PRE_USING_OPENMP
#pragma omp critical (IJAdd)
#endif
{*/
/* For efficiency, we do a buffered IJAddToValues.
* A[global_row, cnt] += RAP_diag_data[j] */
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
cnt, lump_value );
if (lump_percent < 1.0)
{
/* Preserve row sum by updating diagonal */
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
global_row, diagonal_lump_value );
}
/* Update mirror entries, if symmetric collapsing */
if(sym_collapse)
{
/* Update mirror entry */
hypre_NonGalerkinIJBufferWrite( ijmatrix,
&ijbuf_sym_cnt, ijbuf_size, &ijbuf_sym_rowcounter,
&ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols, cnt, global_row, lump_value );
/* Update mirror entry diagonal */
hypre_NonGalerkinIJBufferWrite( ijmatrix,
&ijbuf_sym_cnt, ijbuf_size, &ijbuf_sym_rowcounter,
&ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols, cnt, cnt, neg_lump_value );
}
/*}*/
}
/* The offd_intersection has global column indices, i.e., the
* col_map arrays contain global indices */
for(k = 0; k < offd_intersection_len; k++)
{
lump_value = lump_percent * fabs(offd_intersection_data[k])*sum_strong_neigh;
diagonal_lump_value = (1.0 - lump_percent) * fabs(offd_intersection_data[k])*sum_strong_neigh;
neg_lump_value = -1.0 * lump_value;
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
offd_intersection[k], lump_value );
if (lump_percent < 1.0)
{
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
global_row, diagonal_lump_value );
}
/* Update mirror entries, if symmetric collapsing */
if (sym_collapse)
{
hypre_NonGalerkinIJBufferWrite( ijmatrix,
&ijbuf_sym_cnt, ijbuf_size, &ijbuf_sym_rowcounter,
&ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols, offd_intersection[k],
global_row, lump_value );
hypre_NonGalerkinIJBufferWrite( ijmatrix,
&ijbuf_sym_cnt, ijbuf_size, &ijbuf_sym_rowcounter,
&ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols, offd_intersection[k],
offd_intersection[k], neg_lump_value );
}
}
}
/* If intersection is empty, do not eliminate entry */
else
{
/* Don't forget to update mirror entry if collapsing symmetrically */
if (sym_collapse)
{ lump_value = 0.5*RAP_diag_data[j]; }
else
{ lump_value = RAP_diag_data[j]; }
cnt = col_indx_RAP+first_col_diag_RAP;
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
cnt, lump_value );
if (sym_collapse)
{
hypre_NonGalerkinIJBufferWrite( ijmatrix,
&ijbuf_sym_cnt, ijbuf_size, &ijbuf_sym_rowcounter,
&ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols, cnt, global_row, lump_value );
}
}
}
/* The entry in RAP appears in Pattern, so keep it */
else if(col_indx_RAP == col_indx_Pattern)
{
cnt = col_indx_RAP+first_col_diag_RAP;
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
cnt, RAP_diag_data[j] );
/* Only go to the next entry in Pattern, if this is not the end of a row */
if( current_Pattern_j < Pattern_diag_i[i+1]-1 )
{
current_Pattern_j += 1;
col_indx_Pattern = Pattern_diag_j[current_Pattern_j];
}
else
{ has_row_ended = 1;}
}
/* Increment col_indx_Pattern, and repeat this loop iter for current
* col_ind_RAP value */
else if(col_indx_RAP > col_indx_Pattern)
{
for(; current_Pattern_j < Pattern_diag_i[i+1]; current_Pattern_j++)
{
col_indx_Pattern = Pattern_diag_j[current_Pattern_j];
if(col_indx_RAP <= col_indx_Pattern)
{ break;}
}
/* If col_indx_RAP is still greater (i.e., we've reached a row end), then
* we need to lump everything else in this row */
if(col_indx_RAP > col_indx_Pattern)
{ has_row_ended = 1; }
/* Decrement j, in order to repeat this loop iteration for the current
* col_indx_RAP value */
j--;
}
}
}
}
/*
* Eliminate Entries In RAP_offd
* Structure of this for-loop is very similar to the RAP_diag for-loop
* But, not so similar that these loops should be combined into a single fuction.
* */
if(num_cols_RAP_offd)
{
for(i = 0; i < num_variables; i++)
{
global_row = i+first_col_diag_RAP;
row_start = RAP_offd_i[i];
row_end = RAP_offd_i[i+1];
has_row_ended = 0;
/* Only do work if row has nonzeros */
if( row_start < row_end)
{
current_Pattern_j = Pattern_offd_i[i];
Pattern_offd_indices_len = Pattern_offd_i[i+1] - Pattern_offd_i[i];
if( (Pattern_offd_j != NULL) && (Pattern_offd_indices_len > 0) )
{ col_indx_Pattern = col_map_offd_Pattern[ Pattern_offd_j[current_Pattern_j] ]; }
else
{ /* if Pattern_offd_j is not allocated or this is a zero length row,
then all entries need to be lumped.
This is an analagous situation to has_row_ended=1. */
col_indx_Pattern = -1;
has_row_ended = 1;
}
/* Grab this row's indices out of Pattern offd and diag. This will
* be for computing index set intersections for lumping. The above
* loop over RAP_diag ensures adequate length of Pattern_offd_indices */
/* Ensure adequate length */
hypre_GrabSubArray(Pattern_offd_j,
Pattern_offd_i[i], Pattern_offd_i[i+1]-1,
col_map_offd_Pattern, Pattern_offd_indices);
/* No need to grab info out of Pattern_diag_j[...], here we just start from
* Pattern_diag_i[i] and end at index Pattern_diag_i[i+1] - 1. We do need to
* ignore the diagonal entry in Pattern, because we don't lump entries there */
if( Pattern_diag_j[Pattern_diag_i[i]] == i )
{
Pattern_indices_ptr = &( Pattern_diag_j[Pattern_diag_i[i]+1]);
Pattern_diag_indices_len = Pattern_diag_i[i+1] - Pattern_diag_i[i] - 1;
}
else
{
Pattern_indices_ptr = &( Pattern_diag_j[Pattern_diag_i[i]]);
Pattern_diag_indices_len = Pattern_diag_i[i+1] - Pattern_diag_i[i];
}
}
for(j = row_start; j < row_end; j++)
{
/* Ignore zero entries in RAP */
if( RAP_offd_data[j] != 0.0)
{
/* In general for all the offd_j arrays, we have to indirectly
* index with the col_map_offd array to get a global index */
col_indx_RAP = col_map_offd_RAP[ RAP_offd_j[j] ];
/* The entry in RAP does not appear in Pattern, so LUMP it */
if( (col_indx_RAP < col_indx_Pattern) || has_row_ended)
{
/* The row_indx_Sext would be found with:
row_indx_Sext = hypre_BinarySearch(col_map_offd_RAP, col_indx_RAP, num_cols_RAP_offd);
But, we already know the answer to this with, */
row_indx_Sext = RAP_offd_j[j];
/* Grab the indices for row row_indx_Sext from the offd and diag parts. This will
* be for computing lumping locations */
S_offd_indices_len = S_ext_offd_i[row_indx_Sext+1] - S_ext_offd_i[row_indx_Sext];
if(S_offd_indices_allocated_len < S_offd_indices_len)
{
hypre_TFree(S_offd_indices, HYPRE_MEMORY_HOST);
S_offd_indices = hypre_CTAlloc(HYPRE_BigInt, S_offd_indices_len, HYPRE_MEMORY_HOST);
S_offd_indices_allocated_len = S_offd_indices_len;
}
/* Grab sub array from col_map, corresponding to the slice of S_ext_offd_j */
hypre_GrabSubArray(S_ext_offd_j, S_ext_offd_i[row_indx_Sext], S_ext_offd_i[row_indx_Sext+1]-1,
col_map_offd_Sext, S_offd_indices);
/* No need to grab info out of S_ext_diag_j[...], here we just start from
* S_ext_diag_i[row_indx_Sext] and end at index S_ext_diag_i[row_indx_Sext+1] - 1 */
/* Intersect the diag and offd pieces, remembering that the
* diag array will need to have the offset +first_col_diag_RAP */
cnt = hypre_max(S_offd_indices_len, Pattern_offd_indices_len);
if(offd_intersection_allocated_len < cnt)
{
hypre_TFree(offd_intersection, HYPRE_MEMORY_HOST);
hypre_TFree(offd_intersection_data, HYPRE_MEMORY_HOST);
offd_intersection = hypre_CTAlloc(HYPRE_BigInt, cnt, HYPRE_MEMORY_HOST);
offd_intersection_data = hypre_CTAlloc(HYPRE_Real, cnt, HYPRE_MEMORY_HOST);
offd_intersection_allocated_len = cnt;
}
hypre_IntersectTwoBigArrays(S_offd_indices,
&(S_ext_offd_data[ S_ext_offd_i[row_indx_Sext] ]),
S_offd_indices_len,
Pattern_offd_indices,
Pattern_offd_indices_len,
offd_intersection,
offd_intersection_data,
&offd_intersection_len);
/* Now, intersect the indices for the diag block. */
cnt = hypre_max(Pattern_diag_indices_len,
S_ext_diag_i[row_indx_Sext+1] - S_ext_diag_i[row_indx_Sext] );
if(diag_intersection_allocated_len < cnt)
{
hypre_TFree(diag_intersection, HYPRE_MEMORY_HOST);
hypre_TFree(diag_intersection_data, HYPRE_MEMORY_HOST);
diag_intersection = hypre_CTAlloc(HYPRE_Int, cnt, HYPRE_MEMORY_HOST);
diag_intersection_data = hypre_CTAlloc(HYPRE_Real, cnt, HYPRE_MEMORY_HOST);
diag_intersection_allocated_len = cnt;
}
hypre_IntersectTwoArrays( &(S_ext_diag_j[S_ext_diag_i[row_indx_Sext]]),
&(S_ext_diag_data[ S_ext_diag_i[row_indx_Sext] ]),
S_ext_diag_i[row_indx_Sext+1] - S_ext_diag_i[row_indx_Sext],
Pattern_indices_ptr,
Pattern_diag_indices_len,
diag_intersection,
diag_intersection_data,
&diag_intersection_len);
/* Loop over these intersections, and lump a constant fraction of
* RAP_offd_data[j] to each entry */
intersection_len = diag_intersection_len + offd_intersection_len;
if(intersection_len > 0)
{
/* Sum the strength-of-connection values from row
* row_indx_Sext in S, corresponding to the indices we are
* collapsing to in row i. This will give us our collapsing
* weights. */
sum_strong_neigh = 0.0;
for(k = 0; k < diag_intersection_len; k++)
{ sum_strong_neigh += fabs(diag_intersection_data[k]); }
for(k = 0; k < offd_intersection_len; k++)
{ sum_strong_neigh += fabs(offd_intersection_data[k]); }
sum_strong_neigh = RAP_offd_data[j]/sum_strong_neigh;
/* When lumping with the diag_intersection, must offset column index */
for(k = 0; k < diag_intersection_len; k++)
{
lump_value = lump_percent * fabs(diag_intersection_data[k])*sum_strong_neigh;
diagonal_lump_value = (1.0 - lump_percent) * fabs(diag_intersection_data[k])*sum_strong_neigh;
neg_lump_value = -1.0 * lump_value;
cnt = diag_intersection[k]+first_col_diag_RAP;
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row, cnt, lump_value );
if (lump_percent < 1.0)
{
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row, global_row,
diagonal_lump_value );
}
/* Update mirror entries, if symmetric collapsing */
if (sym_collapse)
{
hypre_NonGalerkinIJBufferWrite( ijmatrix,
&ijbuf_sym_cnt, ijbuf_size,
&ijbuf_sym_rowcounter, &ijbuf_sym_data,
&ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols, cnt, global_row, lump_value);
hypre_NonGalerkinIJBufferWrite( ijmatrix,
&ijbuf_sym_cnt, ijbuf_size,
&ijbuf_sym_rowcounter, &ijbuf_sym_data,
&ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols, cnt, cnt, neg_lump_value );
}
}
/* The offd_intersection has global column indices, i.e., the
* col_map arrays contain global indices */
for(k = 0; k < offd_intersection_len; k++)
{
lump_value = lump_percent * fabs(offd_intersection_data[k])*sum_strong_neigh;
diagonal_lump_value = (1.0 - lump_percent) * fabs(offd_intersection_data[k])*sum_strong_neigh;
neg_lump_value = -1.0 * lump_value;
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
offd_intersection[k], lump_value );
if (lump_percent < 1.0)
{
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row, global_row,
diagonal_lump_value );
}
/* Update mirror entries, if symmetric collapsing */
if (sym_collapse)
{
hypre_NonGalerkinIJBufferWrite( ijmatrix,
&ijbuf_sym_cnt, ijbuf_size,
&ijbuf_sym_rowcounter, &ijbuf_sym_data,
&ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols, offd_intersection[k],
global_row, lump_value );
hypre_NonGalerkinIJBufferWrite( ijmatrix,
&ijbuf_sym_cnt, ijbuf_size,
&ijbuf_sym_rowcounter, &ijbuf_sym_data,
&ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols, offd_intersection[k],
offd_intersection[k], neg_lump_value );
}
}
}
/* If intersection is empty, do not eliminate entry */
else
{
/* Don't forget to update mirror entry if collapsing symmetrically */
if (sym_collapse)
{ lump_value = 0.5*RAP_offd_data[j]; }
else
{ lump_value = RAP_offd_data[j]; }
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row, col_indx_RAP,
lump_value );
if (sym_collapse)
{
hypre_NonGalerkinIJBufferWrite( ijmatrix,
&ijbuf_sym_cnt, ijbuf_size, &ijbuf_sym_rowcounter,
&ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols, col_indx_RAP, global_row,
lump_value );
}
}
}
/* The entry in RAP appears in Pattern, so keep it */
else if (col_indx_RAP == col_indx_Pattern)
{
/* For the offd structure, col_indx_RAP is a global dof number */
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row, col_indx_RAP,
RAP_offd_data[j]);
/* Only go to the next entry in Pattern, if this is not the end of a row */
if( current_Pattern_j < Pattern_offd_i[i+1]-1 )
{
current_Pattern_j += 1;
col_indx_Pattern = col_map_offd_Pattern[ Pattern_offd_j[current_Pattern_j] ];
}
else
{ has_row_ended = 1;}
}
/* Increment col_indx_Pattern, and repeat this loop iter for current
* col_ind_RAP value */
else if(col_indx_RAP > col_indx_Pattern)
{
for(; current_Pattern_j < Pattern_offd_i[i+1]; current_Pattern_j++)
{
col_indx_Pattern = col_map_offd_Pattern[ Pattern_offd_j[current_Pattern_j] ];
if(col_indx_RAP <= col_indx_Pattern)
{ break;}
}
/* If col_indx_RAP is still greater (i.e., we've reached a row end), then
* we need to lump everything else in this row */
if(col_indx_RAP > col_indx_Pattern)
{ has_row_ended = 1; }
/* Decrement j, in order to repeat this loop iteration for the current
* col_indx_RAP value */
j--;
}
}
}
}
}
/* For efficiency, we do a buffered IJAddToValues.
* This empties the buffer of any remaining values */
hypre_NonGalerkinIJBufferEmpty(ijmatrix, ijbuf_size, &ijbuf_cnt, ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols);
if(sym_collapse)
hypre_NonGalerkinIJBufferEmpty(ijmatrix, ijbuf_size, &ijbuf_sym_cnt, ijbuf_sym_rowcounter,
&ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols);
/* Assemble non-Galerkin Matrix, and overwrite current RAP*/
ierr += HYPRE_IJMatrixAssemble (ijmatrix);
ierr += HYPRE_IJMatrixGetObject( ijmatrix, (void**) RAP_ptr);
/* Optional diagnostic matrix printing */
if (0)
{
hypre_sprintf(filename, "Pattern_%d.ij", global_num_vars);
hypre_ParCSRMatrixPrintIJ(Pattern, 0, 0, filename);
hypre_sprintf(filename, "Strength_%d.ij", global_num_vars);
hypre_ParCSRMatrixPrintIJ(S, 0, 0, filename);
hypre_sprintf(filename, "RAP_%d.ij", global_num_vars);
hypre_ParCSRMatrixPrintIJ(RAP, 0, 0, filename);
hypre_sprintf(filename, "RAPc_%d.ij", global_num_vars);
hypre_ParCSRMatrixPrintIJ(*RAP_ptr, 0, 0, filename);
hypre_sprintf(filename, "AP_%d.ij", global_num_vars);
hypre_ParCSRMatrixPrintIJ(AP, 0, 0, filename);
}
/* Free matrices and variables and arrays */
hypre_TFree(ijbuf_data, HYPRE_MEMORY_DEVICE);
hypre_TFree(ijbuf_cols, HYPRE_MEMORY_DEVICE);
hypre_TFree(ijbuf_rownums, HYPRE_MEMORY_DEVICE);
hypre_TFree(ijbuf_numcols, HYPRE_MEMORY_DEVICE);
if(sym_collapse)
{
hypre_TFree(ijbuf_sym_data, HYPRE_MEMORY_DEVICE);
hypre_TFree(ijbuf_sym_cols, HYPRE_MEMORY_DEVICE);
hypre_TFree(ijbuf_sym_rownums, HYPRE_MEMORY_DEVICE);
hypre_TFree(ijbuf_sym_numcols, HYPRE_MEMORY_DEVICE);
}
hypre_TFree(Pattern_offd_indices, HYPRE_MEMORY_HOST);
hypre_TFree(S_ext_diag_i, HYPRE_MEMORY_HOST);
hypre_TFree(S_ext_offd_i, HYPRE_MEMORY_HOST);
hypre_TFree(S_offd_indices, HYPRE_MEMORY_HOST);
hypre_TFree(offd_intersection, HYPRE_MEMORY_HOST);
hypre_TFree(offd_intersection_data, HYPRE_MEMORY_HOST);
hypre_TFree(diag_intersection, HYPRE_MEMORY_HOST);
hypre_TFree(diag_intersection_data, HYPRE_MEMORY_HOST);
if (S_ext_diag_size)
{
hypre_TFree(S_ext_diag_j, HYPRE_MEMORY_HOST);
hypre_TFree(S_ext_diag_data, HYPRE_MEMORY_HOST);
}
if (S_ext_offd_size)
{
hypre_TFree(S_ext_offd_j, HYPRE_MEMORY_HOST);
hypre_TFree(S_ext_offd_data, HYPRE_MEMORY_HOST);
}
if (num_cols_offd_Sext)
{ hypre_TFree(col_map_offd_Sext, HYPRE_MEMORY_HOST); }
ierr += hypre_ParCSRMatrixDestroy(Pattern);
ierr += hypre_ParCSRMatrixDestroy(RAP);
ierr += hypre_ParCSRMatrixDestroy(S);
ierr += HYPRE_IJMatrixSetObjectType(ijmatrix, -1);
ierr += HYPRE_IJMatrixDestroy(ijmatrix);
/*end_time = hypre_MPI_Wtime();
if(my_id == 0)
{ fprintf(stdout, "NonGalerkin Time: %1.2e\n", end_time-start_time); } */
return ierr;
}
|
GB_unop__isinf_bool_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__isinf_bool_fc32
// op(A') function: GB_unop_tran__isinf_bool_fc32
// C type: bool
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = (aij)
// unaryop: cij = GB_cisinff (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_cisinff (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = (aij) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = (aij) ; \
Cx [pC] = GB_cisinff (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISINF || GxB_NO_BOOL || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__isinf_bool_fc32
(
bool *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = (aij) ;
Cx [p] = GB_cisinff (z) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__isinf_bool_fc32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
cache.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC AAA CCCC H H EEEEE %
% C A A C H H E %
% C AAAAA C HHHHH EEE %
% C A A C H H E %
% CCCC A A CCCC H H EEEEE %
% %
% %
% MagickCore Pixel Cache Methods %
% %
% Software Design %
% Cristy %
% July 1999 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/distribute-cache-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/nt-base-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/policy.h"
#include "MagickCore/quantum.h"
#include "MagickCore/random_.h"
#include "MagickCore/registry.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/timer-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
#if defined(MAGICKCORE_ZLIB_DELEGATE)
#include "zlib.h"
#endif
/*
Define declarations.
*/
#define CacheTick(offset,extent) QuantumTick((MagickOffsetType) offset,extent)
#define IsFileDescriptorLimitExceeded() (GetMagickResource(FileResource) > \
GetMagickResourceLimit(FileResource) ? MagickTrue : MagickFalse)
/*
Typedef declarations.
*/
typedef struct _MagickModulo
{
ssize_t
quotient,
remainder;
} MagickModulo;
/*
Forward declarations.
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static Cache
GetImagePixelCache(Image *,const MagickBooleanType,ExceptionInfo *)
magick_hot_spot;
static const Quantum
*GetVirtualPixelCache(const Image *,const VirtualPixelMethod,const ssize_t,
const ssize_t,const size_t,const size_t,ExceptionInfo *),
*GetVirtualPixelsCache(const Image *);
static const void
*GetVirtualMetacontentFromCache(const Image *);
static MagickBooleanType
GetOneAuthenticPixelFromCache(Image *,const ssize_t,const ssize_t,Quantum *,
ExceptionInfo *),
GetOneVirtualPixelFromCache(const Image *,const VirtualPixelMethod,
const ssize_t,const ssize_t,Quantum *,ExceptionInfo *),
OpenPixelCache(Image *,const MapMode,ExceptionInfo *),
OpenPixelCacheOnDisk(CacheInfo *,const MapMode),
ReadPixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict,
ExceptionInfo *),
ReadPixelCacheMetacontent(CacheInfo *magick_restrict,
NexusInfo *magick_restrict,ExceptionInfo *),
SyncAuthenticPixelsCache(Image *,ExceptionInfo *),
WritePixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict,
ExceptionInfo *),
WritePixelCacheMetacontent(CacheInfo *,NexusInfo *magick_restrict,
ExceptionInfo *);
static Quantum
*GetAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t,
const size_t,ExceptionInfo *),
*QueueAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t,
const size_t,ExceptionInfo *),
*SetPixelCacheNexusPixels(const CacheInfo *magick_restrict,const MapMode,
const ssize_t,const ssize_t,const size_t,const size_t,
const MagickBooleanType,NexusInfo *magick_restrict,ExceptionInfo *)
magick_hot_spot;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
static void
CopyOpenCLBuffer(CacheInfo *magick_restrict);
#endif
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
/*
Global declarations.
*/
static SemaphoreInfo
*cache_semaphore = (SemaphoreInfo *) NULL;
static ssize_t
cache_anonymous_memory = (-1);
static time_t
cache_epoch = 0;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCache() acquires a pixel cache.
%
% The format of the AcquirePixelCache() method is:
%
% Cache AcquirePixelCache(const size_t number_threads)
%
% A description of each parameter follows:
%
% o number_threads: the number of nexus threads.
%
*/
MagickPrivate Cache AcquirePixelCache(const size_t number_threads)
{
CacheInfo
*magick_restrict cache_info;
char
*value;
cache_info=(CacheInfo *) AcquireAlignedMemory(1,sizeof(*cache_info));
if (cache_info == (CacheInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(cache_info,0,sizeof(*cache_info));
cache_info->type=UndefinedCache;
cache_info->mode=IOMode;
cache_info->disk_mode=IOMode;
cache_info->colorspace=sRGBColorspace;
cache_info->file=(-1);
cache_info->id=GetMagickThreadId();
cache_info->number_threads=number_threads;
if (GetOpenMPMaximumThreads() > cache_info->number_threads)
cache_info->number_threads=GetOpenMPMaximumThreads();
if (GetMagickResourceLimit(ThreadResource) > cache_info->number_threads)
cache_info->number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
if (cache_info->number_threads == 0)
cache_info->number_threads=1;
cache_info->nexus_info=AcquirePixelCacheNexus(cache_info->number_threads);
if (cache_info->nexus_info == (NexusInfo **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
value=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (value != (const char *) NULL)
{
cache_info->synchronize=IsStringTrue(value);
value=DestroyString(value);
}
value=GetPolicyValue("cache:synchronize");
if (value != (const char *) NULL)
{
cache_info->synchronize=IsStringTrue(value);
value=DestroyString(value);
}
cache_info->width_limit=GetMagickResourceLimit(WidthResource);
cache_info->height_limit=GetMagickResourceLimit(HeightResource);
cache_info->semaphore=AcquireSemaphoreInfo();
cache_info->reference_count=1;
cache_info->file_semaphore=AcquireSemaphoreInfo();
cache_info->debug=IsEventLogging();
cache_info->signature=MagickCoreSignature;
return((Cache ) cache_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCacheNexus() allocates the NexusInfo structure.
%
% The format of the AcquirePixelCacheNexus method is:
%
% NexusInfo **AcquirePixelCacheNexus(const size_t number_threads)
%
% A description of each parameter follows:
%
% o number_threads: the number of nexus threads.
%
*/
MagickPrivate NexusInfo **AcquirePixelCacheNexus(const size_t number_threads)
{
NexusInfo
**magick_restrict nexus_info;
register ssize_t
i;
nexus_info=(NexusInfo **) MagickAssumeAligned(AcquireAlignedMemory(2*
number_threads,sizeof(*nexus_info)));
if (nexus_info == (NexusInfo **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
*nexus_info=(NexusInfo *) AcquireQuantumMemory(2*number_threads,
sizeof(**nexus_info));
if (*nexus_info == (NexusInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(*nexus_info,0,2*number_threads*sizeof(**nexus_info));
for (i=0; i < (ssize_t) (2*number_threads); i++)
{
nexus_info[i]=(*nexus_info+i);
if (i < (ssize_t) number_threads)
nexus_info[i]->virtual_nexus=(*nexus_info+number_threads+i);
nexus_info[i]->signature=MagickCoreSignature;
}
return(nexus_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCachePixels() returns the pixels associated with the specified
% image.
%
% The format of the AcquirePixelCachePixels() method is:
%
% void *AcquirePixelCachePixels(const Image *image,size_t *length,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o length: the pixel cache length.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void *AcquirePixelCachePixels(const Image *image,size_t *length,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
(void) exception;
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*length=0;
if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache))
return((void *) NULL);
*length=(size_t) cache_info->length;
return(cache_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a c h e C o m p o n e n t G e n e s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CacheComponentGenesis() instantiates the cache component.
%
% The format of the CacheComponentGenesis method is:
%
% MagickBooleanType CacheComponentGenesis(void)
%
*/
MagickPrivate MagickBooleanType CacheComponentGenesis(void)
{
if (cache_semaphore == (SemaphoreInfo *) NULL)
cache_semaphore=AcquireSemaphoreInfo();
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a c h e C o m p o n e n t T e r m i n u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CacheComponentTerminus() destroys the cache component.
%
% The format of the CacheComponentTerminus() method is:
%
% CacheComponentTerminus(void)
%
*/
MagickPrivate void CacheComponentTerminus(void)
{
if (cache_semaphore == (SemaphoreInfo *) NULL)
ActivateSemaphoreInfo(&cache_semaphore);
/* no op-- nothing to destroy */
RelinquishSemaphoreInfo(&cache_semaphore);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l i p P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClipPixelCacheNexus() clips the cache nexus as defined by the image clip
% mask. The method returns MagickTrue if the pixel region is clipped,
% otherwise MagickFalse.
%
% The format of the ClipPixelCacheNexus() method is:
%
% MagickBooleanType ClipPixelCacheNexus(Image *image,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to clip.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ClipPixelCacheNexus(Image *image,
NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
register Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
y;
/*
Apply clip mask.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->channels & WriteMaskChannel) == 0)
return(MagickTrue);
if ((nexus_info->region.width == 0) || (nexus_info->region.height == 0))
return(MagickTrue);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return(MagickFalse);
p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y,
nexus_info->region.width,nexus_info->region.height,
nexus_info->virtual_nexus,exception);
q=nexus_info->pixels;
if ((p == (Quantum *) NULL) || (q == (Quantum *) NULL))
return(MagickFalse);
for (y=0; y < (ssize_t) nexus_info->region.height; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) nexus_info->region.width; x++)
{
double
mask_alpha;
register ssize_t
i;
mask_alpha=QuantumScale*GetPixelWriteMask(image,p);
if (fabs(mask_alpha) >= MagickEpsilon)
{
for (i=0; i < (ssize_t) image->number_channels; i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(MagickOver_((double) p[i],mask_alpha*
GetPixelAlpha(image,p),(double) q[i],(double)
GetPixelAlpha(image,q)));
}
SetPixelAlpha(image,GetPixelAlpha(image,p),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCache() clones a pixel cache.
%
% The format of the ClonePixelCache() method is:
%
% Cache ClonePixelCache(const Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
MagickPrivate Cache ClonePixelCache(const Cache cache)
{
CacheInfo
*magick_restrict clone_info;
const CacheInfo
*magick_restrict cache_info;
assert(cache != NULL);
cache_info=(const CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
clone_info=(CacheInfo *) AcquirePixelCache(cache_info->number_threads);
clone_info->virtual_pixel_method=cache_info->virtual_pixel_method;
return((Cache ) clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCacheMethods() clones the pixel cache methods from one cache to
% another.
%
% The format of the ClonePixelCacheMethods() method is:
%
% void ClonePixelCacheMethods(Cache clone,const Cache cache)
%
% A description of each parameter follows:
%
% o clone: Specifies a pointer to a Cache structure.
%
% o cache: the pixel cache.
%
*/
MagickPrivate void ClonePixelCacheMethods(Cache clone,const Cache cache)
{
CacheInfo
*magick_restrict cache_info,
*magick_restrict source_info;
assert(clone != (Cache) NULL);
source_info=(CacheInfo *) clone;
assert(source_info->signature == MagickCoreSignature);
if (source_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
source_info->filename);
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
source_info->methods=cache_info->methods;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e R e p o s i t o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCacheRepository() clones the source pixel cache to the destination
% cache.
%
% The format of the ClonePixelCacheRepository() method is:
%
% MagickBooleanType ClonePixelCacheRepository(CacheInfo *cache_info,
% CacheInfo *source_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o source_info: the source pixel cache.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ClonePixelCacheOnDisk(
CacheInfo *magick_restrict cache_info,CacheInfo *magick_restrict clone_info)
{
MagickSizeType
extent;
size_t
quantum;
ssize_t
count;
struct stat
file_stats;
unsigned char
*buffer;
/*
Clone pixel cache on disk with identical morphology.
*/
if ((OpenPixelCacheOnDisk(cache_info,ReadMode) == MagickFalse) ||
(OpenPixelCacheOnDisk(clone_info,IOMode) == MagickFalse))
return(MagickFalse);
if ((lseek(cache_info->file,0,SEEK_SET) < 0) ||
(lseek(clone_info->file,0,SEEK_SET) < 0))
return(MagickFalse);
quantum=(size_t) MagickMaxBufferExtent;
if ((fstat(cache_info->file,&file_stats) == 0) && (file_stats.st_size > 0))
quantum=(size_t) MagickMin(file_stats.st_size,MagickMaxBufferExtent);
buffer=(unsigned char *) AcquireQuantumMemory(quantum,sizeof(*buffer));
if (buffer == (unsigned char *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
extent=0;
while ((count=read(cache_info->file,buffer,quantum)) > 0)
{
ssize_t
number_bytes;
number_bytes=write(clone_info->file,buffer,(size_t) count);
if (number_bytes != count)
break;
extent+=number_bytes;
}
buffer=(unsigned char *) RelinquishMagickMemory(buffer);
if (extent != cache_info->length)
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType ClonePixelCacheRepository(
CacheInfo *magick_restrict clone_info,CacheInfo *magick_restrict cache_info,
ExceptionInfo *exception)
{
#define MaxCacheThreads ((size_t) GetMagickResourceLimit(ThreadResource))
#define cache_number_threads(source,destination,chunk,multithreaded) \
num_threads((multithreaded) == 0 ? 1 : \
(((source)->type != MemoryCache) && ((source)->type != MapCache)) || \
(((destination)->type != MemoryCache) && ((destination)->type != MapCache)) ? \
MagickMax(MagickMin(GetMagickResourceLimit(ThreadResource),2),1) : \
MagickMax(MagickMin((ssize_t) GetMagickResourceLimit(ThreadResource),(ssize_t) (chunk)/256),1))
MagickBooleanType
optimize,
status;
NexusInfo
**magick_restrict cache_nexus,
**magick_restrict clone_nexus;
size_t
length;
ssize_t
y;
assert(cache_info != (CacheInfo *) NULL);
assert(clone_info != (CacheInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
if (cache_info->type == PingCache)
return(MagickTrue);
length=cache_info->number_channels*sizeof(*cache_info->channel_map);
if ((cache_info->storage_class == clone_info->storage_class) &&
(cache_info->colorspace == clone_info->colorspace) &&
(cache_info->alpha_trait == clone_info->alpha_trait) &&
(cache_info->channels == clone_info->channels) &&
(cache_info->columns == clone_info->columns) &&
(cache_info->rows == clone_info->rows) &&
(cache_info->number_channels == clone_info->number_channels) &&
(memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) &&
(cache_info->metacontent_extent == clone_info->metacontent_extent))
{
/*
Identical pixel cache morphology.
*/
if (((cache_info->type == MemoryCache) ||
(cache_info->type == MapCache)) &&
((clone_info->type == MemoryCache) || (clone_info->type == MapCache)))
{
(void) memcpy(clone_info->pixels,cache_info->pixels,
cache_info->number_channels*cache_info->columns*cache_info->rows*
sizeof(*cache_info->pixels));
if ((cache_info->metacontent_extent != 0) &&
(clone_info->metacontent_extent != 0))
(void) memcpy(clone_info->metacontent,cache_info->metacontent,
cache_info->columns*cache_info->rows*
clone_info->metacontent_extent*sizeof(unsigned char));
return(MagickTrue);
}
if ((cache_info->type == DiskCache) && (clone_info->type == DiskCache))
return(ClonePixelCacheOnDisk(cache_info,clone_info));
}
/*
Mismatched pixel cache morphology.
*/
cache_nexus=AcquirePixelCacheNexus(cache_info->number_threads);
clone_nexus=AcquirePixelCacheNexus(clone_info->number_threads);
length=cache_info->number_channels*sizeof(*cache_info->channel_map);
optimize=(cache_info->number_channels == clone_info->number_channels) &&
(memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) ?
MagickTrue : MagickFalse;
length=(size_t) MagickMin(cache_info->number_channels*cache_info->columns,
clone_info->number_channels*clone_info->columns);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
cache_number_threads(cache_info,clone_info,cache_info->rows,1)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
register ssize_t
x;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y,
cache_info->columns,1,MagickFalse,cache_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
status=ReadPixelCachePixels(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y,
clone_info->columns,1,MagickFalse,clone_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
(void) memset(clone_nexus[id]->pixels,0,(size_t) clone_nexus[id]->length);
if (optimize != MagickFalse)
(void) memcpy(clone_nexus[id]->pixels,cache_nexus[id]->pixels,length*
sizeof(Quantum));
else
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
/*
Mismatched pixel channel map.
*/
p=cache_nexus[id]->pixels;
q=clone_nexus[id]->pixels;
for (x=0; x < (ssize_t) cache_info->columns; x++)
{
register ssize_t
i;
if (x == (ssize_t) clone_info->columns)
break;
for (i=0; i < (ssize_t) clone_info->number_channels; i++)
{
PixelChannel
channel;
PixelTrait
traits;
channel=clone_info->channel_map[i].channel;
traits=cache_info->channel_map[channel].traits;
if (traits != UndefinedPixelTrait)
*q=*(p+cache_info->channel_map[channel].offset);
q++;
}
p+=cache_info->number_channels;
}
}
status=WritePixelCachePixels(clone_info,clone_nexus[id],exception);
}
if ((cache_info->metacontent_extent != 0) &&
(clone_info->metacontent_extent != 0))
{
/*
Clone metacontent.
*/
length=(size_t) MagickMin(cache_info->metacontent_extent,
clone_info->metacontent_extent);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
cache_number_threads(cache_info,clone_info,cache_info->rows,1)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y,
cache_info->columns,1,MagickFalse,cache_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
status=ReadPixelCacheMetacontent(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y,
clone_info->columns,1,MagickFalse,clone_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
if ((clone_nexus[id]->metacontent != (void *) NULL) &&
(cache_nexus[id]->metacontent != (void *) NULL))
(void) memcpy(clone_nexus[id]->metacontent,
cache_nexus[id]->metacontent,length*sizeof(unsigned char));
status=WritePixelCacheMetacontent(clone_info,clone_nexus[id],exception);
}
}
clone_nexus=DestroyPixelCacheNexus(clone_nexus,clone_info->number_threads);
cache_nexus=DestroyPixelCacheNexus(cache_nexus,cache_info->number_threads);
if (cache_info->debug != MagickFalse)
{
char
message[MagickPathExtent];
(void) FormatLocaleString(message,MagickPathExtent,"%s => %s",
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type),
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) clone_info->type));
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImagePixelCache() deallocates memory associated with the pixel cache.
%
% The format of the DestroyImagePixelCache() method is:
%
% void DestroyImagePixelCache(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void DestroyImagePixelCache(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->cache != (void *) NULL)
image->cache=DestroyPixelCache(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImagePixels() deallocates memory associated with the pixel cache.
%
% The format of the DestroyImagePixels() method is:
%
% void DestroyImagePixels(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DestroyImagePixels(Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.destroy_pixel_handler != (DestroyPixelHandler) NULL)
{
cache_info->methods.destroy_pixel_handler(image);
return;
}
image->cache=DestroyPixelCache(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelCache() deallocates memory associated with the pixel cache.
%
% The format of the DestroyPixelCache() method is:
%
% Cache DestroyPixelCache(Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
static MagickBooleanType ClosePixelCacheOnDisk(CacheInfo *cache_info)
{
int
status;
status=(-1);
if (cache_info->file != -1)
{
status=close(cache_info->file);
cache_info->file=(-1);
RelinquishMagickResource(FileResource,1);
}
return(status == -1 ? MagickFalse : MagickTrue);
}
static inline void RelinquishPixelCachePixels(CacheInfo *cache_info)
{
switch (cache_info->type)
{
case MemoryCache:
{
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (cache_info->opencl != (MagickCLCacheInfo) NULL)
{
cache_info->opencl=RelinquishMagickCLCacheInfo(cache_info->opencl,
MagickTrue);
cache_info->pixels=(Quantum *) NULL;
break;
}
#endif
if (cache_info->mapped == MagickFalse)
cache_info->pixels=(Quantum *) RelinquishAlignedMemory(
cache_info->pixels);
else
(void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length);
RelinquishMagickResource(MemoryResource,cache_info->length);
break;
}
case MapCache:
{
(void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length);
cache_info->pixels=(Quantum *) NULL;
if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode))
(void) RelinquishUniqueFileResource(cache_info->cache_filename);
*cache_info->cache_filename='\0';
RelinquishMagickResource(MapResource,cache_info->length);
}
case DiskCache:
{
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode))
(void) RelinquishUniqueFileResource(cache_info->cache_filename);
*cache_info->cache_filename='\0';
RelinquishMagickResource(DiskResource,cache_info->length);
break;
}
case DistributedCache:
{
*cache_info->cache_filename='\0';
(void) RelinquishDistributePixelCache((DistributeCacheInfo *)
cache_info->server_info);
break;
}
default:
break;
}
cache_info->type=UndefinedCache;
cache_info->mapped=MagickFalse;
cache_info->metacontent=(void *) NULL;
}
MagickPrivate Cache DestroyPixelCache(Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
LockSemaphoreInfo(cache_info->semaphore);
cache_info->reference_count--;
if (cache_info->reference_count != 0)
{
UnlockSemaphoreInfo(cache_info->semaphore);
return((Cache) NULL);
}
UnlockSemaphoreInfo(cache_info->semaphore);
if (cache_info->debug != MagickFalse)
{
char
message[MagickPathExtent];
(void) FormatLocaleString(message,MagickPathExtent,"destroy %s",
cache_info->filename);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
RelinquishPixelCachePixels(cache_info);
if (cache_info->server_info != (DistributeCacheInfo *) NULL)
cache_info->server_info=DestroyDistributeCacheInfo((DistributeCacheInfo *)
cache_info->server_info);
if (cache_info->nexus_info != (NexusInfo **) NULL)
cache_info->nexus_info=DestroyPixelCacheNexus(cache_info->nexus_info,
cache_info->number_threads);
if (cache_info->random_info != (RandomInfo *) NULL)
cache_info->random_info=DestroyRandomInfo(cache_info->random_info);
if (cache_info->file_semaphore != (SemaphoreInfo *) NULL)
RelinquishSemaphoreInfo(&cache_info->file_semaphore);
if (cache_info->semaphore != (SemaphoreInfo *) NULL)
RelinquishSemaphoreInfo(&cache_info->semaphore);
cache_info->signature=(~MagickCoreSignature);
cache_info=(CacheInfo *) RelinquishAlignedMemory(cache_info);
cache=(Cache) NULL;
return(cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelCacheNexus() destroys a pixel cache nexus.
%
% The format of the DestroyPixelCacheNexus() method is:
%
% NexusInfo **DestroyPixelCacheNexus(NexusInfo *nexus_info,
% const size_t number_threads)
%
% A description of each parameter follows:
%
% o nexus_info: the nexus to destroy.
%
% o number_threads: the number of nexus threads.
%
*/
static inline void RelinquishCacheNexusPixels(NexusInfo *nexus_info)
{
if (nexus_info->mapped == MagickFalse)
(void) RelinquishAlignedMemory(nexus_info->cache);
else
(void) UnmapBlob(nexus_info->cache,(size_t) nexus_info->length);
nexus_info->cache=(Quantum *) NULL;
nexus_info->pixels=(Quantum *) NULL;
nexus_info->metacontent=(void *) NULL;
nexus_info->length=0;
nexus_info->mapped=MagickFalse;
}
MagickPrivate NexusInfo **DestroyPixelCacheNexus(NexusInfo **nexus_info,
const size_t number_threads)
{
register ssize_t
i;
assert(nexus_info != (NexusInfo **) NULL);
for (i=0; i < (ssize_t) (2*number_threads); i++)
{
if (nexus_info[i]->cache != (Quantum *) NULL)
RelinquishCacheNexusPixels(nexus_info[i]);
nexus_info[i]->signature=(~MagickCoreSignature);
}
*nexus_info=(NexusInfo *) RelinquishMagickMemory(*nexus_info);
nexus_info=(NexusInfo **) RelinquishAlignedMemory(nexus_info);
return(nexus_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticMetacontent() returns the authentic metacontent corresponding
% with the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is
% returned if the associated pixels are not available.
%
% The format of the GetAuthenticMetacontent() method is:
%
% void *GetAuthenticMetacontent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void *GetAuthenticMetacontent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_metacontent_from_handler !=
(GetAuthenticMetacontentFromHandler) NULL)
{
void
*metacontent;
metacontent=cache_info->methods.
get_authentic_metacontent_from_handler(image);
return(metacontent);
}
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c M e t a c o n t e n t F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticMetacontentFromCache() returns the meta-content corresponding
% with the last call to QueueAuthenticPixelsCache() or
% GetAuthenticPixelsCache().
%
% The format of the GetAuthenticMetacontentFromCache() method is:
%
% void *GetAuthenticMetacontentFromCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void *GetAuthenticMetacontentFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->metacontent);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c O p e n C L B u f f e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticOpenCLBuffer() returns an OpenCL buffer used to execute OpenCL
% operations.
%
% The format of the GetAuthenticOpenCLBuffer() method is:
%
% cl_mem GetAuthenticOpenCLBuffer(const Image *image,
% MagickCLDevice device,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o device: the device to use.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate cl_mem GetAuthenticOpenCLBuffer(const Image *image,
MagickCLDevice device,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(device != (const MagickCLDevice) NULL);
cache_info=(CacheInfo *) image->cache;
if ((cache_info->type == UndefinedCache) || (cache_info->reference_count > 1))
{
SyncImagePixelCache((Image *) image,exception);
cache_info=(CacheInfo *) image->cache;
}
if ((cache_info->type != MemoryCache) || (cache_info->mapped != MagickFalse))
return((cl_mem) NULL);
LockSemaphoreInfo(cache_info->semaphore);
if ((cache_info->opencl != (MagickCLCacheInfo) NULL) &&
(cache_info->opencl->device->context != device->context))
cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl);
if (cache_info->opencl == (MagickCLCacheInfo) NULL)
{
assert(cache_info->pixels != (Quantum *) NULL);
cache_info->opencl=AcquireMagickCLCacheInfo(device,cache_info->pixels,
cache_info->length);
}
if (cache_info->opencl != (MagickCLCacheInfo) NULL)
RetainOpenCLMemObject(cache_info->opencl->buffer);
UnlockSemaphoreInfo(cache_info->semaphore);
if (cache_info->opencl == (MagickCLCacheInfo) NULL)
return((cl_mem) NULL);
assert(cache_info->opencl->pixels == cache_info->pixels);
return(cache_info->opencl->buffer);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelCacheNexus() gets authentic pixels from the in-memory or
% disk pixel cache as defined by the geometry parameters. A pointer to the
% pixels is returned if the pixels are transferred, otherwise a NULL is
% returned.
%
% The format of the GetAuthenticPixelCacheNexus() method is:
%
% Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to return.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
Quantum
*magick_restrict pixels;
/*
Transfer pixels from the cache.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickTrue,
nexus_info,exception);
if (pixels == (Quantum *) NULL)
return((Quantum *) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(pixels);
if (ReadPixelCachePixels(cache_info,nexus_info,exception) == MagickFalse)
return((Quantum *) NULL);
if (cache_info->metacontent_extent != 0)
if (ReadPixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse)
return((Quantum *) NULL);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelsFromCache() returns the pixels associated with the last
% call to the QueueAuthenticPixelsCache() or GetAuthenticPixelsCache() methods.
%
% The format of the GetAuthenticPixelsFromCache() method is:
%
% Quantum *GetAuthenticPixelsFromCache(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static Quantum *GetAuthenticPixelsFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c P i x e l Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelQueue() returns the authentic pixels associated
% corresponding with the last call to QueueAuthenticPixels() or
% GetAuthenticPixels().
%
% The format of the GetAuthenticPixelQueue() method is:
%
% Quantum *GetAuthenticPixelQueue(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Quantum *GetAuthenticPixelQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_pixels_from_handler !=
(GetAuthenticPixelsFromHandler) NULL)
return(cache_info->methods.get_authentic_pixels_from_handler(image));
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixels() obtains a pixel region for read/write access. If the
% region is successfully accessed, a pointer to a Quantum array
% representing the region is returned, otherwise NULL is returned.
%
% The returned pointer may point to a temporary working copy of the pixels
% or it may point to the original pixels in memory. Performance is maximized
% if the selected region is part of one row, or one or more full rows, since
% then there is opportunity to access the pixels in-place (without a copy)
% if the image is in memory, or in a memory-mapped file. The returned pointer
% must *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% Quantum. If the image has corresponding metacontent,call
% GetAuthenticMetacontent() after invoking GetAuthenticPixels() to obtain the
% meta-content corresponding to the region. Once the Quantum array has
% been updated, the changes must be saved back to the underlying image using
% SyncAuthenticPixels() or they may be lost.
%
% The format of the GetAuthenticPixels() method is:
%
% Quantum *GetAuthenticPixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Quantum *GetAuthenticPixels(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_pixels_handler !=
(GetAuthenticPixelsHandler) NULL)
{
pixels=cache_info->methods.get_authentic_pixels_handler(image,x,y,columns,
rows,exception);
return(pixels);
}
assert(id < (int) cache_info->number_threads);
pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l s C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelsCache() gets pixels from the in-memory or disk pixel cache
% as defined by the geometry parameters. A pointer to the pixels is returned
% if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetAuthenticPixelsCache() method is:
%
% Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return((Quantum *) NULL);
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageExtent() returns the extent of the pixels associated corresponding
% with the last call to QueueAuthenticPixels() or GetAuthenticPixels().
%
% The format of the GetImageExtent() method is:
%
% MagickSizeType GetImageExtent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickSizeType GetImageExtent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetPixelCacheNexusExtent(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixelCache() ensures that there is only a single reference to the
% pixel cache to be modified, updating the provided cache pointer to point to
% a clone of the original pixel cache if necessary.
%
% The format of the GetImagePixelCache method is:
%
% Cache GetImagePixelCache(Image *image,const MagickBooleanType clone,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clone: any value other than MagickFalse clones the cache pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType ValidatePixelCacheMorphology(
const Image *magick_restrict image)
{
const CacheInfo
*magick_restrict cache_info;
const PixelChannelMap
*magick_restrict p,
*magick_restrict q;
/*
Does the image match the pixel cache morphology?
*/
cache_info=(CacheInfo *) image->cache;
p=image->channel_map;
q=cache_info->channel_map;
if ((image->storage_class != cache_info->storage_class) ||
(image->colorspace != cache_info->colorspace) ||
(image->alpha_trait != cache_info->alpha_trait) ||
(image->channels != cache_info->channels) ||
(image->columns != cache_info->columns) ||
(image->rows != cache_info->rows) ||
(image->number_channels != cache_info->number_channels) ||
(memcmp(p,q,image->number_channels*sizeof(*p)) != 0) ||
(image->metacontent_extent != cache_info->metacontent_extent) ||
(cache_info->nexus_info == (NexusInfo **) NULL))
return(MagickFalse);
return(MagickTrue);
}
static Cache GetImagePixelCache(Image *image,const MagickBooleanType clone,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickBooleanType
destroy,
status;
static MagickSizeType
cache_timelimit = MagickResourceInfinity,
cpu_throttle = MagickResourceInfinity,
cycles = 0;
status=MagickTrue;
if (cpu_throttle == MagickResourceInfinity)
cpu_throttle=GetMagickResourceLimit(ThrottleResource);
if ((cpu_throttle != 0) && ((cycles++ % 32) == 0))
MagickDelay(cpu_throttle);
if (cache_epoch == 0)
{
/*
Set the expire time in seconds.
*/
cache_timelimit=GetMagickResourceLimit(TimeResource);
cache_epoch=GetMagickTime();
}
if ((cache_timelimit != MagickResourceInfinity) &&
((MagickSizeType) (GetMagickTime()-cache_epoch) >= cache_timelimit))
{
#if defined(ECANCELED)
errno=ECANCELED;
#endif
cache_info=(CacheInfo *) image->cache;
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
ThrowFatalException(ResourceLimitFatalError,"TimeLimitExceeded");
}
LockSemaphoreInfo(image->semaphore);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
destroy=MagickFalse;
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
LockSemaphoreInfo(cache_info->semaphore);
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
CacheInfo
*clone_info;
Image
clone_image;
/*
Clone pixel cache.
*/
clone_image=(*image);
clone_image.semaphore=AcquireSemaphoreInfo();
clone_image.reference_count=1;
clone_image.cache=ClonePixelCache(cache_info);
clone_info=(CacheInfo *) clone_image.cache;
status=OpenPixelCache(&clone_image,IOMode,exception);
if (status == MagickFalse)
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
else
{
if (clone != MagickFalse)
status=ClonePixelCacheRepository(clone_info,cache_info,
exception);
if (status == MagickFalse)
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
else
{
destroy=MagickTrue;
image->cache=clone_info;
}
}
RelinquishSemaphoreInfo(&clone_image.semaphore);
}
UnlockSemaphoreInfo(cache_info->semaphore);
}
if (destroy != MagickFalse)
cache_info=(CacheInfo *) DestroyPixelCache(cache_info);
if (status != MagickFalse)
{
/*
Ensure the image matches the pixel cache morphology.
*/
if (image->type != UndefinedType)
image->type=UndefinedType;
if (ValidatePixelCacheMorphology(image) == MagickFalse)
{
status=OpenPixelCache(image,IOMode,exception);
cache_info=(CacheInfo *) image->cache;
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
}
}
UnlockSemaphoreInfo(image->semaphore);
if (status == MagickFalse)
return((Cache) NULL);
return(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e P i x e l C a c h e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixelCacheType() returns the pixel cache type: UndefinedCache,
% DiskCache, MemoryCache, MapCache, or PingCache.
%
% The format of the GetImagePixelCacheType() method is:
%
% CacheType GetImagePixelCacheType(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport CacheType GetImagePixelCacheType(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->type);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e A u t h e n t i c P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneAuthenticPixel() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs.
%
% The format of the GetOneAuthenticPixel() method is:
%
% MagickBooleanType GetOneAuthenticPixel(const Image image,const ssize_t x,
% const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType CopyPixel(const Image *image,
const Quantum *source,Quantum *destination)
{
register ssize_t
i;
if (source == (const Quantum *) NULL)
{
destination[RedPixelChannel]=ClampToQuantum(image->background_color.red);
destination[GreenPixelChannel]=ClampToQuantum(
image->background_color.green);
destination[BluePixelChannel]=ClampToQuantum(
image->background_color.blue);
destination[BlackPixelChannel]=ClampToQuantum(
image->background_color.black);
destination[AlphaPixelChannel]=ClampToQuantum(
image->background_color.alpha);
return(MagickFalse);
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
destination[channel]=source[i];
}
return(MagickTrue);
}
MagickExport MagickBooleanType GetOneAuthenticPixel(Image *image,
const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
register Quantum
*magick_restrict q;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
if (cache_info->methods.get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL)
return(cache_info->methods.get_one_authentic_pixel_from_handler(image,x,y,pixel,exception));
q=GetAuthenticPixelsCache(image,x,y,1UL,1UL,exception);
return(CopyPixel(image,q,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O n e A u t h e n t i c P i x e l F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneAuthenticPixelFromCache() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs.
%
% The format of the GetOneAuthenticPixelFromCache() method is:
%
% MagickBooleanType GetOneAuthenticPixelFromCache(const Image image,
% const ssize_t x,const ssize_t y,Quantum *pixel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetOneAuthenticPixelFromCache(Image *image,
const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
q=GetAuthenticPixelCacheNexus(image,x,y,1UL,1UL,cache_info->nexus_info[id],
exception);
return(CopyPixel(image,q,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixel() returns a single virtual pixel at the specified
% (x,y) location. The image background color is returned if an error occurs.
% If you plan to modify the pixel, use GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualPixel() method is:
%
% MagickBooleanType GetOneVirtualPixel(const Image image,const ssize_t x,
% const ssize_t y,Quantum *pixel,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualPixel(const Image *image,
const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
if (cache_info->methods.get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
return(cache_info->methods.get_one_virtual_pixel_from_handler(image,
GetPixelCacheVirtualMethod(image),x,y,pixel,exception));
assert(id < (int) cache_info->number_threads);
p=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y,
1UL,1UL,cache_info->nexus_info[id],exception);
return(CopyPixel(image,p,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O n e V i r t u a l P i x e l F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixelFromCache() returns a single virtual pixel at the
% specified (x,y) location. The image background color is returned if an
% error occurs.
%
% The format of the GetOneVirtualPixelFromCache() method is:
%
% MagickBooleanType GetOneVirtualPixelFromCache(const Image image,
% const VirtualPixelMethod method,const ssize_t x,const ssize_t y,
% Quantum *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetOneVirtualPixelFromCache(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
return(CopyPixel(image,p,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l P i x e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixelInfo() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs. If
% you plan to modify the pixel, use GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualPixelInfo() method is:
%
% MagickBooleanType GetOneVirtualPixelInfo(const Image image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,PixelInfo *pixel,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: these values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualPixelInfo(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
PixelInfo *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
GetPixelInfo(image,pixel);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
if (p == (const Quantum *) NULL)
return(MagickFalse);
GetPixelInfoPixel(image,p,pixel);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheColorspace() returns the colorspace of the pixel cache.
%
% The format of the GetPixelCacheColorspace() method is:
%
% Colorspace GetPixelCacheColorspace(const Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
MagickPrivate ColorspaceType GetPixelCacheColorspace(const Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->colorspace);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e F i l e n a m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheFilename() returns the filename associated with the pixel
% cache.
%
% The format of the GetPixelCacheFilename() method is:
%
% const char *GetPixelCacheFilename(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const char *GetPixelCacheFilename(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->cache_filename);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheMethods() initializes the CacheMethods structure.
%
% The format of the GetPixelCacheMethods() method is:
%
% void GetPixelCacheMethods(CacheMethods *cache_methods)
%
% A description of each parameter follows:
%
% o cache_methods: Specifies a pointer to a CacheMethods structure.
%
*/
MagickPrivate void GetPixelCacheMethods(CacheMethods *cache_methods)
{
assert(cache_methods != (CacheMethods *) NULL);
(void) memset(cache_methods,0,sizeof(*cache_methods));
cache_methods->get_virtual_pixel_handler=GetVirtualPixelCache;
cache_methods->get_virtual_pixels_handler=GetVirtualPixelsCache;
cache_methods->get_virtual_metacontent_from_handler=
GetVirtualMetacontentFromCache;
cache_methods->get_one_virtual_pixel_from_handler=GetOneVirtualPixelFromCache;
cache_methods->get_authentic_pixels_handler=GetAuthenticPixelsCache;
cache_methods->get_authentic_metacontent_from_handler=
GetAuthenticMetacontentFromCache;
cache_methods->get_authentic_pixels_from_handler=GetAuthenticPixelsFromCache;
cache_methods->get_one_authentic_pixel_from_handler=
GetOneAuthenticPixelFromCache;
cache_methods->queue_authentic_pixels_handler=QueueAuthenticPixelsCache;
cache_methods->sync_authentic_pixels_handler=SyncAuthenticPixelsCache;
cache_methods->destroy_pixel_handler=DestroyImagePixelCache;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e N e x u s E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheNexusExtent() returns the extent of the pixels associated
% corresponding with the last call to SetPixelCacheNexusPixels() or
% GetPixelCacheNexusPixels().
%
% The format of the GetPixelCacheNexusExtent() method is:
%
% MagickSizeType GetPixelCacheNexusExtent(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o nexus_info: the nexus info.
%
*/
MagickPrivate MagickSizeType GetPixelCacheNexusExtent(const Cache cache,
NexusInfo *magick_restrict nexus_info)
{
CacheInfo
*magick_restrict cache_info;
MagickSizeType
extent;
assert(cache != NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
extent=(MagickSizeType) nexus_info->region.width*nexus_info->region.height;
if (extent == 0)
return((MagickSizeType) cache_info->columns*cache_info->rows);
return(extent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCachePixels() returns the pixels associated with the specified image.
%
% The format of the GetPixelCachePixels() method is:
%
% void *GetPixelCachePixels(Image *image,MagickSizeType *length,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o length: the pixel cache length.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void *GetPixelCachePixels(Image *image,MagickSizeType *length,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
assert(length != (MagickSizeType *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*length=cache_info->length;
if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache))
return((void *) NULL);
return((void *) cache_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e S t o r a g e C l a s s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheStorageClass() returns the class type of the pixel cache.
%
% The format of the GetPixelCacheStorageClass() method is:
%
% ClassType GetPixelCacheStorageClass(Cache cache)
%
% A description of each parameter follows:
%
% o type: GetPixelCacheStorageClass returns DirectClass or PseudoClass.
%
% o cache: the pixel cache.
%
*/
MagickPrivate ClassType GetPixelCacheStorageClass(const Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->storage_class);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e T i l e S i z e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheTileSize() returns the pixel cache tile size.
%
% The format of the GetPixelCacheTileSize() method is:
%
% void GetPixelCacheTileSize(const Image *image,size_t *width,
% size_t *height)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the optimized cache tile width in pixels.
%
% o height: the optimized cache tile height in pixels.
%
*/
MagickPrivate void GetPixelCacheTileSize(const Image *image,size_t *width,
size_t *height)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*width=2048UL/(MagickMax(cache_info->number_channels,1)*sizeof(Quantum));
if (GetImagePixelCacheType(image) == DiskCache)
*width=8192UL/(MagickMax(cache_info->number_channels,1)*sizeof(Quantum));
*height=(*width);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e V i r t u a l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheVirtualMethod() gets the "virtual pixels" method for the
% pixel cache. A virtual pixel is any pixel access that is outside the
% boundaries of the image cache.
%
% The format of the GetPixelCacheVirtualMethod() method is:
%
% VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickPrivate VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->virtual_pixel_method);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l M e t a c o n t e n t F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualMetacontentFromCache() returns the meta-content corresponding with
% the last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache().
%
% The format of the GetVirtualMetacontentFromCache() method is:
%
% void *GetVirtualMetacontentFromCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static const void *GetVirtualMetacontentFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const void
*magick_restrict metacontent;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
metacontent=GetVirtualMetacontentFromNexus(cache_info,
cache_info->nexus_info[id]);
return(metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l M e t a c o n t e n t F r o m N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualMetacontentFromNexus() returns the meta-content for the specified
% cache nexus.
%
% The format of the GetVirtualMetacontentFromNexus() method is:
%
% const void *GetVirtualMetacontentFromNexus(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o nexus_info: the cache nexus to return the meta-content.
%
*/
MagickPrivate const void *GetVirtualMetacontentFromNexus(const Cache cache,
NexusInfo *magick_restrict nexus_info)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->storage_class == UndefinedClass)
return((void *) NULL);
return(nexus_info->metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualMetacontent() returns the virtual metacontent corresponding with
% the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is
% returned if the meta-content are not available.
%
% The format of the GetVirtualMetacontent() method is:
%
% const void *GetVirtualMetacontent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const void *GetVirtualMetacontent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const void
*magick_restrict metacontent;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
metacontent=cache_info->methods.get_virtual_metacontent_from_handler(image);
if (metacontent != (void *) NULL)
return(metacontent);
assert(id < (int) cache_info->number_threads);
metacontent=GetVirtualMetacontentFromNexus(cache_info,
cache_info->nexus_info[id]);
return(metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelCacheNexus() gets virtual pixels from the in-memory or disk
% pixel cache as defined by the geometry parameters. A pointer to the pixels
% is returned if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetVirtualPixelCacheNexus() method is:
%
% Quantum *GetVirtualPixelCacheNexus(const Image *image,
% const VirtualPixelMethod method,const ssize_t x,const ssize_t y,
% const size_t columns,const size_t rows,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to acquire.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t
DitherMatrix[64] =
{
0, 48, 12, 60, 3, 51, 15, 63,
32, 16, 44, 28, 35, 19, 47, 31,
8, 56, 4, 52, 11, 59, 7, 55,
40, 24, 36, 20, 43, 27, 39, 23,
2, 50, 14, 62, 1, 49, 13, 61,
34, 18, 46, 30, 33, 17, 45, 29,
10, 58, 6, 54, 9, 57, 5, 53,
42, 26, 38, 22, 41, 25, 37, 21
};
static inline ssize_t DitherX(const ssize_t x,const size_t columns)
{
ssize_t
index;
index=x+DitherMatrix[x & 0x07]-32L;
if (index < 0L)
return(0L);
if (index >= (ssize_t) columns)
return((ssize_t) columns-1L);
return(index);
}
static inline ssize_t DitherY(const ssize_t y,const size_t rows)
{
ssize_t
index;
index=y+DitherMatrix[y & 0x07]-32L;
if (index < 0L)
return(0L);
if (index >= (ssize_t) rows)
return((ssize_t) rows-1L);
return(index);
}
static inline ssize_t EdgeX(const ssize_t x,const size_t columns)
{
if (x < 0L)
return(0L);
if (x >= (ssize_t) columns)
return((ssize_t) (columns-1));
return(x);
}
static inline ssize_t EdgeY(const ssize_t y,const size_t rows)
{
if (y < 0L)
return(0L);
if (y >= (ssize_t) rows)
return((ssize_t) (rows-1));
return(y);
}
static inline ssize_t RandomX(RandomInfo *random_info,const size_t columns)
{
return((ssize_t) (columns*GetPseudoRandomValue(random_info)));
}
static inline ssize_t RandomY(RandomInfo *random_info,const size_t rows)
{
return((ssize_t) (rows*GetPseudoRandomValue(random_info)));
}
static inline MagickModulo VirtualPixelModulo(const ssize_t offset,
const size_t extent)
{
MagickModulo
modulo;
modulo.quotient=offset/((ssize_t) extent);
modulo.remainder=offset % ((ssize_t) extent);
if ((modulo.remainder != 0) && ((offset ^ ((ssize_t) extent)) < 0))
{
modulo.quotient-=1;
modulo.remainder+=((ssize_t) extent);
}
return(modulo);
}
MagickPrivate const Quantum *GetVirtualPixelCacheNexus(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
offset;
MagickSizeType
length,
number_pixels;
NexusInfo
*magick_restrict virtual_nexus;
Quantum
*magick_restrict pixels,
virtual_pixel[MaxPixelChannels];
register const Quantum
*magick_restrict p;
register const void
*magick_restrict r;
register Quantum
*magick_restrict q;
register ssize_t
i,
u;
register unsigned char
*magick_restrict s;
ssize_t
v;
void
*magick_restrict virtual_metacontent;
/*
Acquire pixels.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return((const Quantum *) NULL);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,x,y,columns,rows,
((image->channels & WriteMaskChannel) != 0) ||
((image->channels & CompositeMaskChannel) != 0) ? MagickTrue : MagickFalse,
nexus_info,exception);
if (pixels == (Quantum *) NULL)
return((const Quantum *) NULL);
q=pixels;
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) (nexus_info->region.height-1L)*cache_info->columns+
nexus_info->region.width-1L;
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
if ((offset >= 0) && (((MagickSizeType) offset+length) < number_pixels))
if ((x >= 0) && ((ssize_t) (x+columns-1) < (ssize_t) cache_info->columns) &&
(y >= 0) && ((ssize_t) (y+rows-1) < (ssize_t) cache_info->rows))
{
MagickBooleanType
status;
/*
Pixel request is inside cache extents.
*/
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(q);
status=ReadPixelCachePixels(cache_info,nexus_info,exception);
if (status == MagickFalse)
return((const Quantum *) NULL);
if (cache_info->metacontent_extent != 0)
{
status=ReadPixelCacheMetacontent(cache_info,nexus_info,exception);
if (status == MagickFalse)
return((const Quantum *) NULL);
}
return(q);
}
/*
Pixel request is outside cache extents.
*/
virtual_nexus=nexus_info->virtual_nexus;
s=(unsigned char *) nexus_info->metacontent;
(void) memset(virtual_pixel,0,cache_info->number_channels*
sizeof(*virtual_pixel));
virtual_metacontent=(void *) NULL;
switch (virtual_pixel_method)
{
case BackgroundVirtualPixelMethod:
case BlackVirtualPixelMethod:
case GrayVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
case EdgeVirtualPixelMethod:
case CheckerTileVirtualPixelMethod:
case HorizontalTileVirtualPixelMethod:
case VerticalTileVirtualPixelMethod:
{
if (cache_info->metacontent_extent != 0)
{
/*
Acquire a metacontent buffer.
*/
virtual_metacontent=(void *) AcquireQuantumMemory(1,
cache_info->metacontent_extent);
if (virtual_metacontent == (void *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
CacheError,"UnableToGetCacheNexus","`%s'",image->filename);
return((const Quantum *) NULL);
}
(void) memset(virtual_metacontent,0,cache_info->metacontent_extent);
}
switch (virtual_pixel_method)
{
case BlackVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel);
SetPixelAlpha(image,OpaqueAlpha,virtual_pixel);
break;
}
case GrayVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,QuantumRange/2,
virtual_pixel);
SetPixelAlpha(image,OpaqueAlpha,virtual_pixel);
break;
}
case TransparentVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel);
SetPixelAlpha(image,TransparentAlpha,virtual_pixel);
break;
}
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,QuantumRange,virtual_pixel);
SetPixelAlpha(image,OpaqueAlpha,virtual_pixel);
break;
}
default:
{
SetPixelRed(image,ClampToQuantum(image->background_color.red),
virtual_pixel);
SetPixelGreen(image,ClampToQuantum(image->background_color.green),
virtual_pixel);
SetPixelBlue(image,ClampToQuantum(image->background_color.blue),
virtual_pixel);
SetPixelBlack(image,ClampToQuantum(image->background_color.black),
virtual_pixel);
SetPixelAlpha(image,ClampToQuantum(image->background_color.alpha),
virtual_pixel);
break;
}
}
break;
}
default:
break;
}
for (v=0; v < (ssize_t) rows; v++)
{
ssize_t
y_offset;
y_offset=y+v;
if ((virtual_pixel_method == EdgeVirtualPixelMethod) ||
(virtual_pixel_method == UndefinedVirtualPixelMethod))
y_offset=EdgeY(y_offset,cache_info->rows);
for (u=0; u < (ssize_t) columns; u+=length)
{
ssize_t
x_offset;
x_offset=x+u;
length=(MagickSizeType) MagickMin(cache_info->columns-x_offset,columns-u);
if (((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) ||
((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) ||
(length == 0))
{
MagickModulo
x_modulo,
y_modulo;
/*
Transfer a single pixel.
*/
length=(MagickSizeType) 1;
switch (virtual_pixel_method)
{
case EdgeVirtualPixelMethod:
default:
{
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
EdgeX(x_offset,cache_info->columns),
EdgeY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,
nexus_info->virtual_nexus);
break;
}
case RandomVirtualPixelMethod:
{
if (cache_info->random_info == (RandomInfo *) NULL)
cache_info->random_info=AcquireRandomInfo();
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
RandomX(cache_info->random_info,cache_info->columns),
RandomY(cache_info->random_info,cache_info->rows),1UL,1UL,
virtual_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case DitherVirtualPixelMethod:
{
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
DitherX(x_offset,cache_info->columns),
DitherY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case TileVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case MirrorVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
if ((x_modulo.quotient & 0x01) == 1L)
x_modulo.remainder=(ssize_t) cache_info->columns-
x_modulo.remainder-1L;
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
if ((y_modulo.quotient & 0x01) == 1L)
y_modulo.remainder=(ssize_t) cache_info->rows-
y_modulo.remainder-1L;
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case HorizontalTileEdgeVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,EdgeY(y_offset,cache_info->rows),1UL,1UL,
virtual_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case VerticalTileEdgeVirtualPixelMethod:
{
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
EdgeX(x_offset,cache_info->columns),y_modulo.remainder,1UL,1UL,
virtual_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case BackgroundVirtualPixelMethod:
case BlackVirtualPixelMethod:
case GrayVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
case CheckerTileVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
if (((x_modulo.quotient ^ y_modulo.quotient) & 0x01) != 0L)
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case HorizontalTileVirtualPixelMethod:
{
if ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows))
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case VerticalTileVirtualPixelMethod:
{
if ((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns))
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
}
if (p == (const Quantum *) NULL)
break;
(void) memcpy(q,p,(size_t) (cache_info->number_channels*length*
sizeof(*p)));
q+=cache_info->number_channels;
if ((s != (void *) NULL) && (r != (const void *) NULL))
{
(void) memcpy(s,r,(size_t) cache_info->metacontent_extent);
s+=cache_info->metacontent_extent;
}
continue;
}
/*
Transfer a run of pixels.
*/
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x_offset,y_offset,
(size_t) length,1UL,virtual_nexus,exception);
if (p == (const Quantum *) NULL)
break;
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
(void) memcpy(q,p,(size_t) (cache_info->number_channels*length*
sizeof(*p)));
q+=cache_info->number_channels*length;
if ((r != (void *) NULL) && (s != (const void *) NULL))
{
(void) memcpy(s,r,(size_t) length);
s+=length*cache_info->metacontent_extent;
}
}
if (u < (ssize_t) columns)
break;
}
/*
Free resources.
*/
if (virtual_metacontent != (void *) NULL)
virtual_metacontent=(void *) RelinquishMagickMemory(virtual_metacontent);
if (v < (ssize_t) rows)
return((const Quantum *) NULL);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelCache() get virtual pixels from the in-memory or disk pixel
% cache as defined by the geometry parameters. A pointer to the pixels
% is returned if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetVirtualPixelCache() method is:
%
% const Quantum *GetVirtualPixelCache(const Image *image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const Quantum *GetVirtualPixelCache(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,columns,rows,
cache_info->nexus_info[id],exception);
return(p);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l P i x e l Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelQueue() returns the virtual pixels associated corresponding
% with the last call to QueueAuthenticPixels() or GetVirtualPixels().
%
% The format of the GetVirtualPixelQueue() method is:
%
% const Quantum *GetVirtualPixelQueue(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const Quantum *GetVirtualPixelQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_virtual_pixels_handler !=
(GetVirtualPixelsHandler) NULL)
return(cache_info->methods.get_virtual_pixels_handler(image));
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsNexus(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixels() returns an immutable pixel region. If the
% region is successfully accessed, a pointer to it is returned, otherwise
% NULL is returned. The returned pointer may point to a temporary working
% copy of the pixels or it may point to the original pixels in memory.
% Performance is maximized if the selected region is part of one row, or one
% or more full rows, since there is opportunity to access the pixels in-place
% (without a copy) if the image is in memory, or in a memory-mapped file. The
% returned pointer must *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% Quantum. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to
% access the meta-content (of type void) corresponding to the
% region.
%
% If you plan to modify the pixels, use GetAuthenticPixels() instead.
%
% Note, the GetVirtualPixels() and GetAuthenticPixels() methods are not thread-
% safe. In a threaded environment, use GetCacheViewVirtualPixels() or
% GetCacheViewAuthenticPixels() instead.
%
% The format of the GetVirtualPixels() method is:
%
% const Quantum *GetVirtualPixels(const Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport const Quantum *GetVirtualPixels(const Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_virtual_pixel_handler !=
(GetVirtualPixelHandler) NULL)
return(cache_info->methods.get_virtual_pixel_handler(image,
GetPixelCacheVirtualMethod(image),x,y,columns,rows,exception));
assert(id < (int) cache_info->number_threads);
p=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y,
columns,rows,cache_info->nexus_info[id],exception);
return(p);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsCache() returns the pixels associated corresponding with the
% last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache().
%
% The format of the GetVirtualPixelsCache() method is:
%
% Quantum *GetVirtualPixelsCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static const Quantum *GetVirtualPixelsCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsNexus(image->cache,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsNexus() returns the pixels associated with the specified
% cache nexus.
%
% The format of the GetVirtualPixelsNexus() method is:
%
% const Quantum *GetVirtualPixelsNexus(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o nexus_info: the cache nexus to return the colormap pixels.
%
*/
MagickPrivate const Quantum *GetVirtualPixelsNexus(const Cache cache,
NexusInfo *magick_restrict nexus_info)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->storage_class == UndefinedClass)
return((Quantum *) NULL);
return((const Quantum *) nexus_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M a s k P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MaskPixelCacheNexus() masks the cache nexus as defined by the composite mask.
% The method returns MagickTrue if the pixel region is masked, otherwise
% MagickFalse.
%
% The format of the MaskPixelCacheNexus() method is:
%
% MagickBooleanType MaskPixelCacheNexus(Image *image,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to clip.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Quantum ApplyPixelCompositeMask(const Quantum p,
const MagickRealType alpha,const Quantum q,const MagickRealType beta)
{
double
mask_alpha;
Quantum
pixel;
if (fabs(alpha-OpaqueAlpha) < MagickEpsilon)
return(p);
mask_alpha=1.0-QuantumScale*QuantumScale*alpha*beta;
mask_alpha=PerceptibleReciprocal(mask_alpha);
pixel=ClampToQuantum(mask_alpha*MagickOver_((double) p,alpha,(double) q,
beta));
return(pixel);
}
static MagickBooleanType MaskPixelCacheNexus(Image *image,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
register Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
y;
/*
Apply composite mask.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->channels & CompositeMaskChannel) == 0)
return(MagickTrue);
if ((nexus_info->region.width == 0) || (nexus_info->region.height == 0))
return(MagickTrue);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return(MagickFalse);
p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y,
nexus_info->region.width,nexus_info->region.height,
nexus_info->virtual_nexus,exception);
q=nexus_info->pixels;
if ((p == (Quantum *) NULL) || (q == (Quantum *) NULL))
return(MagickFalse);
for (y=0; y < (ssize_t) nexus_info->region.height; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) nexus_info->region.width; x++)
{
double
mask_alpha;
register ssize_t
i;
mask_alpha=(double) GetPixelCompositeMask(image,p);
for (i=0; i < (ssize_t) image->number_channels; i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ApplyPixelCompositeMask(p[i],mask_alpha,q[i],(MagickRealType)
GetPixelAlpha(image,q));
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ O p e n P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OpenPixelCache() allocates the pixel cache. This includes defining the cache
% dimensions, allocating space for the image pixels and optionally the
% metacontent, and memory mapping the cache if it is disk based. The cache
% nexus array is initialized as well.
%
% The format of the OpenPixelCache() method is:
%
% MagickBooleanType OpenPixelCache(Image *image,const MapMode mode,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o mode: ReadMode, WriteMode, or IOMode.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType OpenPixelCacheOnDisk(CacheInfo *cache_info,
const MapMode mode)
{
int
file;
/*
Open pixel cache on disk.
*/
if ((cache_info->file != -1) && (cache_info->disk_mode == mode))
return(MagickTrue); /* cache already open and in the proper mode */
if (*cache_info->cache_filename == '\0')
file=AcquireUniqueFileResource(cache_info->cache_filename);
else
switch (mode)
{
case ReadMode:
{
file=open_utf8(cache_info->cache_filename,O_RDONLY | O_BINARY,0);
break;
}
case WriteMode:
{
file=open_utf8(cache_info->cache_filename,O_WRONLY | O_CREAT |
O_BINARY | O_EXCL,S_MODE);
if (file == -1)
file=open_utf8(cache_info->cache_filename,O_WRONLY | O_BINARY,S_MODE);
break;
}
case IOMode:
default:
{
file=open_utf8(cache_info->cache_filename,O_RDWR | O_CREAT | O_BINARY |
O_EXCL,S_MODE);
if (file == -1)
file=open_utf8(cache_info->cache_filename,O_RDWR | O_BINARY,S_MODE);
break;
}
}
if (file == -1)
return(MagickFalse);
(void) AcquireMagickResource(FileResource,1);
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
cache_info->file=file;
cache_info->disk_mode=mode;
return(MagickTrue);
}
static inline MagickOffsetType WritePixelCacheRegion(
const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset,
const MagickSizeType length,const unsigned char *magick_restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PWRITE)
if (lseek(cache_info->file,offset,SEEK_SET) < 0)
return((MagickOffsetType) -1);
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PWRITE)
count=write(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX));
#else
count=pwrite(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX),offset+i);
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
return(i);
}
static MagickBooleanType SetPixelCacheExtent(Image *image,MagickSizeType length)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
count,
extent,
offset;
cache_info=(CacheInfo *) image->cache;
if (image->debug != MagickFalse)
{
char
format[MagickPathExtent],
message[MagickPathExtent];
(void) FormatMagickSize(length,MagickFalse,"B",MagickPathExtent,format);
(void) FormatLocaleString(message,MagickPathExtent,
"extend %s (%s[%d], disk, %s)",cache_info->filename,
cache_info->cache_filename,cache_info->file,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
if (length != (MagickSizeType) ((MagickOffsetType) length))
return(MagickFalse);
offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_END);
if (offset < 0)
return(MagickFalse);
if ((MagickSizeType) offset >= length)
count=(MagickOffsetType) 1;
else
{
extent=(MagickOffsetType) length-1;
count=WritePixelCacheRegion(cache_info,extent,1,(const unsigned char *)
"");
if (count != 1)
return(MagickFalse);
#if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE)
if (cache_info->synchronize != MagickFalse)
if (posix_fallocate(cache_info->file,offset+1,extent-offset) != 0)
return(MagickFalse);
#endif
}
offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_SET);
if (offset < 0)
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType OpenPixelCache(Image *image,const MapMode mode,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info,
source_info;
char
format[MagickPathExtent],
message[MagickPathExtent];
const char
*hosts,
*type;
MagickBooleanType
status;
MagickSizeType
length,
number_pixels;
size_t
columns,
packet_size;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (cache_anonymous_memory < 0)
{
char
*value;
/*
Does the security policy require anonymous mapping for pixel cache?
*/
cache_anonymous_memory=0;
value=GetPolicyValue("pixel-cache-memory");
if (value == (char *) NULL)
value=GetPolicyValue("cache:memory-map");
if (LocaleCompare(value,"anonymous") == 0)
{
#if defined(MAGICKCORE_HAVE_MMAP) && defined(MAP_ANONYMOUS)
cache_anonymous_memory=1;
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"DelegateLibrarySupportNotBuiltIn",
"'%s' (policy requires anonymous memory mapping)",image->filename);
#endif
}
value=DestroyString(value);
}
if ((image->columns == 0) || (image->rows == 0))
ThrowBinaryException(CacheError,"NoPixelsDefinedInCache",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (((MagickSizeType) image->columns > cache_info->width_limit) ||
((MagickSizeType) image->rows > cache_info->height_limit))
ThrowBinaryException(ImageError,"WidthOrHeightExceedsLimit",
image->filename);
length=GetImageListLength(image);
if (AcquireMagickResource(ListLengthResource,length) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"ListLengthExceedsLimit",
image->filename);
source_info=(*cache_info);
source_info.file=(-1);
(void) FormatLocaleString(cache_info->filename,MagickPathExtent,"%s[%.20g]",
image->filename,(double) image->scene);
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
cache_info->alpha_trait=image->alpha_trait;
cache_info->channels=image->channels;
cache_info->rows=image->rows;
cache_info->columns=image->columns;
InitializePixelChannelMap(image);
cache_info->number_channels=GetPixelChannels(image);
(void) memcpy(cache_info->channel_map,image->channel_map,MaxPixelChannels*
sizeof(*image->channel_map));
cache_info->metacontent_extent=image->metacontent_extent;
cache_info->mode=mode;
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
packet_size=cache_info->number_channels*sizeof(Quantum);
if (image->metacontent_extent != 0)
packet_size+=cache_info->metacontent_extent;
length=number_pixels*packet_size;
columns=(size_t) (length/cache_info->rows/packet_size);
if ((cache_info->columns != columns) || ((ssize_t) cache_info->columns < 0) ||
((ssize_t) cache_info->rows < 0))
ThrowBinaryException(ResourceLimitError,"PixelCacheAllocationFailed",
image->filename);
cache_info->length=length;
if (image->ping != MagickFalse)
{
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
cache_info->type=PingCache;
return(MagickTrue);
}
status=AcquireMagickResource(AreaResource,(MagickSizeType)
cache_info->columns*cache_info->rows);
if (cache_info->mode == PersistMode)
status=MagickFalse;
length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+
cache_info->metacontent_extent);
if ((status != MagickFalse) &&
(length == (MagickSizeType) ((size_t) length)) &&
((cache_info->type == UndefinedCache) ||
(cache_info->type == MemoryCache)))
{
status=AcquireMagickResource(MemoryResource,cache_info->length);
if (status != MagickFalse)
{
status=MagickTrue;
if (cache_anonymous_memory <= 0)
{
cache_info->mapped=MagickFalse;
cache_info->pixels=(Quantum *) MagickAssumeAligned(
AcquireAlignedMemory(1,(size_t) cache_info->length));
}
else
{
cache_info->mapped=MagickTrue;
cache_info->pixels=(Quantum *) MapBlob(-1,IOMode,0,(size_t)
cache_info->length);
}
if (cache_info->pixels == (Quantum *) NULL)
{
cache_info->mapped=source_info.mapped;
cache_info->pixels=source_info.pixels;
}
else
{
/*
Create memory pixel cache.
*/
cache_info->type=MemoryCache;
cache_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
cache_info->metacontent=(void *) (cache_info->pixels+
cache_info->number_channels*number_pixels);
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickTrue,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s %s, %.20gx%.20gx%.20g %s)",
cache_info->filename,cache_info->mapped != MagickFalse ?
"Anonymous" : "Heap",type,(double) cache_info->columns,
(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
cache_info->storage_class=image->storage_class;
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
}
}
status=AcquireMagickResource(DiskResource,cache_info->length);
hosts=(const char *) GetImageRegistry(StringRegistryType,"cache:hosts",
exception);
if ((status == MagickFalse) && (hosts != (const char *) NULL))
{
DistributeCacheInfo
*server_info;
/*
Distribute the pixel cache to a remote server.
*/
server_info=AcquireDistributeCacheInfo(exception);
if (server_info != (DistributeCacheInfo *) NULL)
{
status=OpenDistributePixelCache(server_info,image);
if (status == MagickFalse)
{
ThrowFileException(exception,CacheError,"UnableToOpenPixelCache",
GetDistributeCacheHostname(server_info));
server_info=DestroyDistributeCacheInfo(server_info);
}
else
{
/*
Create a distributed pixel cache.
*/
status=MagickTrue;
cache_info->type=DistributedCache;
cache_info->server_info=server_info;
(void) FormatLocaleString(cache_info->cache_filename,
MagickPathExtent,"%s:%d",GetDistributeCacheHostname(
(DistributeCacheInfo *) cache_info->server_info),
GetDistributeCachePort((DistributeCacheInfo *)
cache_info->server_info));
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickFalse,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",
cache_info->filename,cache_info->cache_filename,
GetDistributeCacheFile((DistributeCacheInfo *)
cache_info->server_info),type,(double) cache_info->columns,
(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
}
cache_info->type=UndefinedCache;
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
/*
Create pixel cache on disk.
*/
if (status == MagickFalse)
{
cache_info->type=UndefinedCache;
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode) &&
(cache_info->mode != PersistMode))
{
(void) ClosePixelCacheOnDisk(cache_info);
*cache_info->cache_filename='\0';
}
if (OpenPixelCacheOnDisk(cache_info,mode) == MagickFalse)
{
cache_info->type=UndefinedCache;
ThrowFileException(exception,CacheError,"UnableToOpenPixelCache",
image->filename);
return(MagickFalse);
}
status=SetPixelCacheExtent(image,(MagickSizeType) cache_info->offset+
cache_info->length);
if (status == MagickFalse)
{
cache_info->type=UndefinedCache;
ThrowFileException(exception,CacheError,"UnableToExtendCache",
image->filename);
return(MagickFalse);
}
cache_info->type=DiskCache;
length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+
cache_info->metacontent_extent);
if (length == (MagickSizeType) ((size_t) length))
{
status=AcquireMagickResource(MapResource,cache_info->length);
if (status != MagickFalse)
{
cache_info->pixels=(Quantum *) MapBlob(cache_info->file,mode,
cache_info->offset,(size_t) cache_info->length);
if (cache_info->pixels == (Quantum *) NULL)
{
cache_info->mapped=source_info.mapped;
cache_info->pixels=source_info.pixels;
RelinquishMagickResource(MapResource,cache_info->length);
}
else
{
/*
Create file-backed memory-mapped pixel cache.
*/
(void) ClosePixelCacheOnDisk(cache_info);
cache_info->type=MapCache;
cache_info->mapped=MagickTrue;
cache_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
cache_info->metacontent=(void *) (cache_info->pixels+
cache_info->number_channels*number_pixels);
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickTrue,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",
cache_info->filename,cache_info->cache_filename,
cache_info->file,type,(double) cache_info->columns,
(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
}
}
status=MagickTrue;
if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickFalse,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",cache_info->filename,
cache_info->cache_filename,cache_info->file,type,(double)
cache_info->columns,(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r s i s t P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PersistPixelCache() attaches to or initializes a persistent pixel cache. A
% persistent pixel cache is one that resides on disk and is not destroyed
% when the program exits.
%
% The format of the PersistPixelCache() method is:
%
% MagickBooleanType PersistPixelCache(Image *image,const char *filename,
% const MagickBooleanType attach,MagickOffsetType *offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o filename: the persistent pixel cache filename.
%
% o attach: A value other than zero initializes the persistent pixel cache.
%
% o initialize: A value other than zero initializes the persistent pixel
% cache.
%
% o offset: the offset in the persistent cache to store pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType PersistPixelCache(Image *image,
const char *filename,const MagickBooleanType attach,MagickOffsetType *offset,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info,
*magick_restrict clone_info;
MagickBooleanType
status;
ssize_t
page_size;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (void *) NULL);
assert(filename != (const char *) NULL);
assert(offset != (MagickOffsetType *) NULL);
page_size=GetMagickPageSize();
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
if (attach != MagickFalse)
{
/*
Attach existing persistent pixel cache.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"attach persistent cache");
(void) CopyMagickString(cache_info->cache_filename,filename,
MagickPathExtent);
cache_info->type=MapCache;
cache_info->offset=(*offset);
if (OpenPixelCache(image,ReadMode,exception) == MagickFalse)
return(MagickFalse);
*offset+=cache_info->length+page_size-(cache_info->length % page_size);
return(MagickTrue);
}
/*
Clone persistent pixel cache.
*/
status=AcquireMagickResource(DiskResource,cache_info->length);
if (status == MagickFalse)
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
clone_info=(CacheInfo *) ClonePixelCache(cache_info);
clone_info->type=DiskCache;
(void) CopyMagickString(clone_info->cache_filename,filename,MagickPathExtent);
clone_info->file=(-1);
clone_info->storage_class=cache_info->storage_class;
clone_info->colorspace=cache_info->colorspace;
clone_info->alpha_trait=cache_info->alpha_trait;
clone_info->channels=cache_info->channels;
clone_info->columns=cache_info->columns;
clone_info->rows=cache_info->rows;
clone_info->number_channels=cache_info->number_channels;
clone_info->metacontent_extent=cache_info->metacontent_extent;
clone_info->mode=PersistMode;
clone_info->length=cache_info->length;
(void) memcpy(clone_info->channel_map,cache_info->channel_map,
MaxPixelChannels*sizeof(*cache_info->channel_map));
clone_info->offset=(*offset);
status=ClonePixelCacheRepository(clone_info,cache_info,exception);
*offset+=cache_info->length+page_size-(cache_info->length % page_size);
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u e u e A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixelCacheNexus() allocates an region to store image pixels as
% defined by the region rectangle and returns a pointer to the region. This
% region is subsequently transferred from the pixel cache with
% SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the
% pixels are transferred, otherwise a NULL is returned.
%
% The format of the QueueAuthenticPixelCacheNexus() method is:
%
% Quantum *QueueAuthenticPixelCacheNexus(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% const MagickBooleanType clone,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to set.
%
% o clone: clone the pixel cache.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate Quantum *QueueAuthenticPixelCacheNexus(Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
const MagickBooleanType clone,NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
offset;
MagickSizeType
number_pixels;
Quantum
*magick_restrict pixels;
/*
Validate pixel cache geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) GetImagePixelCache(image,clone,exception);
if (cache_info == (Cache) NULL)
return((Quantum *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if ((cache_info->columns == 0) || (cache_info->rows == 0) || (x < 0) ||
(y < 0) || (x >= (ssize_t) cache_info->columns) ||
(y >= (ssize_t) cache_info->rows))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"PixelsAreNotAuthentic","`%s'",image->filename);
return((Quantum *) NULL);
}
offset=(MagickOffsetType) y*cache_info->columns+x;
if (offset < 0)
return((Quantum *) NULL);
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
offset+=(MagickOffsetType) (rows-1)*cache_info->columns+columns-1;
if ((MagickSizeType) offset >= number_pixels)
return((Quantum *) NULL);
/*
Return pixel cache.
*/
pixels=SetPixelCacheNexusPixels(cache_info,WriteMode,x,y,columns,rows,
((image->channels & WriteMaskChannel) != 0) ||
((image->channels & CompositeMaskChannel) != 0) ? MagickTrue : MagickFalse,
nexus_info,exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u e u e A u t h e n t i c P i x e l s C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixelsCache() allocates an region to store image pixels as
% defined by the region rectangle and returns a pointer to the region. This
% region is subsequently transferred from the pixel cache with
% SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the
% pixels are transferred, otherwise a NULL is returned.
%
% The format of the QueueAuthenticPixelsCache() method is:
%
% Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u e u e A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixels() queues a mutable pixel region. If the region is
% successfully initialized a pointer to a Quantum array representing the
% region is returned, otherwise NULL is returned. The returned pointer may
% point to a temporary working buffer for the pixels or it may point to the
% final location of the pixels in memory.
%
% Write-only access means that any existing pixel values corresponding to
% the region are ignored. This is useful if the initial image is being
% created from scratch, or if the existing pixel values are to be
% completely replaced without need to refer to their pre-existing values.
% The application is free to read and write the pixel buffer returned by
% QueueAuthenticPixels() any way it pleases. QueueAuthenticPixels() does not
% initialize the pixel array values. Initializing pixel array values is the
% application's responsibility.
%
% Performance is maximized if the selected region is part of one row, or
% one or more full rows, since then there is opportunity to access the
% pixels in-place (without a copy) if the image is in memory, or in a
% memory-mapped file. The returned pointer must *never* be deallocated
% by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% Quantum. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to
% obtain the meta-content (of type void) corresponding to the region.
% Once the Quantum (and/or Quantum) array has been updated, the
% changes must be saved back to the underlying image using
% SyncAuthenticPixels() or they may be lost.
%
% The format of the QueueAuthenticPixels() method is:
%
% Quantum *QueueAuthenticPixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Quantum *QueueAuthenticPixels(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict pixels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.queue_authentic_pixels_handler !=
(QueueAuthenticPixelsHandler) NULL)
{
pixels=cache_info->methods.queue_authentic_pixels_handler(image,x,y,
columns,rows,exception);
return(pixels);
}
assert(id < (int) cache_info->number_threads);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d P i x e l C a c h e M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPixelCacheMetacontent() reads metacontent from the specified region of
% the pixel cache.
%
% The format of the ReadPixelCacheMetacontent() method is:
%
% MagickBooleanType ReadPixelCacheMetacontent(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to read the metacontent.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickOffsetType ReadPixelCacheRegion(
const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset,
const MagickSizeType length,unsigned char *magick_restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PREAD)
if (lseek(cache_info->file,offset,SEEK_SET) < 0)
return((MagickOffsetType) -1);
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PREAD)
count=read(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX));
#else
count=pread(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX),offset+i);
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
return(i);
}
static MagickBooleanType ReadPixelCacheMetacontent(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register ssize_t
y;
register unsigned char
*magick_restrict q;
size_t
rows;
if (cache_info->metacontent_extent == 0)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*
cache_info->metacontent_extent;
extent=length*nexus_info->region.height;
rows=nexus_info->region.height;
y=0;
q=(unsigned char *) nexus_info->metacontent;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register unsigned char
*magick_restrict p;
/*
Read meta-content from memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
p=(unsigned char *) cache_info->metacontent+offset*
cache_info->metacontent_extent;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->metacontent_extent*cache_info->columns;
q+=cache_info->metacontent_extent*nexus_info->region.width;
}
break;
}
case DiskCache:
{
/*
Read meta content from disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
extent=(MagickSizeType) cache_info->columns*cache_info->rows;
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadPixelCacheRegion(cache_info,cache_info->offset+extent*
cache_info->number_channels*sizeof(Quantum)+offset*
cache_info->metacontent_extent,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
offset+=cache_info->columns;
q+=cache_info->metacontent_extent*nexus_info->region.width;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Read metacontent from distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadDistributePixelCacheMetacontent((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
q+=cache_info->metacontent_extent*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToReadPixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPixelCachePixels() reads pixels from the specified region of the pixel
% cache.
%
% The format of the ReadPixelCachePixels() method is:
%
% MagickBooleanType ReadPixelCachePixels(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to read the pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ReadPixelCachePixels(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register Quantum
*magick_restrict q;
register ssize_t
y;
size_t
number_channels,
rows;
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns;
if ((ssize_t) (offset/cache_info->columns) != nexus_info->region.y)
return(MagickFalse);
offset+=nexus_info->region.x;
number_channels=cache_info->number_channels;
length=(MagickSizeType) number_channels*nexus_info->region.width*
sizeof(Quantum);
if ((length/number_channels/sizeof(Quantum)) != nexus_info->region.width)
return(MagickFalse);
rows=nexus_info->region.height;
extent=length*rows;
if ((extent == 0) || ((extent/length) != rows))
return(MagickFalse);
y=0;
q=nexus_info->pixels;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register Quantum
*magick_restrict p;
/*
Read pixels from memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
p=cache_info->pixels+cache_info->number_channels*offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->number_channels*cache_info->columns;
q+=cache_info->number_channels*nexus_info->region.width;
}
break;
}
case DiskCache:
{
/*
Read pixels from disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadPixelCacheRegion(cache_info,cache_info->offset+offset*
cache_info->number_channels*sizeof(*q),length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
offset+=cache_info->columns;
q+=cache_info->number_channels*nexus_info->region.width;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Read pixels from distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadDistributePixelCachePixels((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
q+=cache_info->number_channels*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToReadPixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e f e r e n c e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReferencePixelCache() increments the reference count associated with the
% pixel cache returning a pointer to the cache.
%
% The format of the ReferencePixelCache method is:
%
% Cache ReferencePixelCache(Cache cache_info)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
*/
MagickPrivate Cache ReferencePixelCache(Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache *) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
LockSemaphoreInfo(cache_info->semaphore);
cache_info->reference_count++;
UnlockSemaphoreInfo(cache_info->semaphore);
return(cache_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e s e t P i x e l C a c h e C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetPixelCacheChannels() resets the pixel cache channels.
%
% The format of the ResetPixelCacheChannels method is:
%
% void ResetPixelCacheChannels(Image *)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickPrivate void ResetPixelCacheChannels(Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
cache_info->number_channels=GetPixelChannels(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e s e t C a c h e A n o n y m o u s M e m o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetCacheAnonymousMemory() resets the anonymous_memory value.
%
% The format of the ResetCacheAnonymousMemory method is:
%
% void ResetCacheAnonymousMemory(void)
%
*/
MagickPrivate void ResetCacheAnonymousMemory(void)
{
cache_anonymous_memory=0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e s e t P i x e l C a c h e E p o c h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetPixelCacheEpoch() resets the pixel cache epoch.
%
% The format of the ResetPixelCacheEpoch method is:
%
% void ResetPixelCacheEpoch(void)
%
*/
MagickPrivate void ResetPixelCacheEpoch(void)
{
cache_epoch=0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheMethods() sets the image pixel methods to the specified ones.
%
% The format of the SetPixelCacheMethods() method is:
%
% SetPixelCacheMethods(Cache *,CacheMethods *cache_methods)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o cache_methods: Specifies a pointer to a CacheMethods structure.
%
*/
MagickPrivate void SetPixelCacheMethods(Cache cache,CacheMethods *cache_methods)
{
CacheInfo
*magick_restrict cache_info;
GetOneAuthenticPixelFromHandler
get_one_authentic_pixel_from_handler;
GetOneVirtualPixelFromHandler
get_one_virtual_pixel_from_handler;
/*
Set cache pixel methods.
*/
assert(cache != (Cache) NULL);
assert(cache_methods != (CacheMethods *) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
if (cache_methods->get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL)
cache_info->methods.get_virtual_pixel_handler=
cache_methods->get_virtual_pixel_handler;
if (cache_methods->destroy_pixel_handler != (DestroyPixelHandler) NULL)
cache_info->methods.destroy_pixel_handler=
cache_methods->destroy_pixel_handler;
if (cache_methods->get_virtual_metacontent_from_handler !=
(GetVirtualMetacontentFromHandler) NULL)
cache_info->methods.get_virtual_metacontent_from_handler=
cache_methods->get_virtual_metacontent_from_handler;
if (cache_methods->get_authentic_pixels_handler !=
(GetAuthenticPixelsHandler) NULL)
cache_info->methods.get_authentic_pixels_handler=
cache_methods->get_authentic_pixels_handler;
if (cache_methods->queue_authentic_pixels_handler !=
(QueueAuthenticPixelsHandler) NULL)
cache_info->methods.queue_authentic_pixels_handler=
cache_methods->queue_authentic_pixels_handler;
if (cache_methods->sync_authentic_pixels_handler !=
(SyncAuthenticPixelsHandler) NULL)
cache_info->methods.sync_authentic_pixels_handler=
cache_methods->sync_authentic_pixels_handler;
if (cache_methods->get_authentic_pixels_from_handler !=
(GetAuthenticPixelsFromHandler) NULL)
cache_info->methods.get_authentic_pixels_from_handler=
cache_methods->get_authentic_pixels_from_handler;
if (cache_methods->get_authentic_metacontent_from_handler !=
(GetAuthenticMetacontentFromHandler) NULL)
cache_info->methods.get_authentic_metacontent_from_handler=
cache_methods->get_authentic_metacontent_from_handler;
get_one_virtual_pixel_from_handler=
cache_info->methods.get_one_virtual_pixel_from_handler;
if (get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
cache_info->methods.get_one_virtual_pixel_from_handler=
cache_methods->get_one_virtual_pixel_from_handler;
get_one_authentic_pixel_from_handler=
cache_methods->get_one_authentic_pixel_from_handler;
if (get_one_authentic_pixel_from_handler !=
(GetOneAuthenticPixelFromHandler) NULL)
cache_info->methods.get_one_authentic_pixel_from_handler=
cache_methods->get_one_authentic_pixel_from_handler;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t P i x e l C a c h e N e x u s P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheNexusPixels() defines the region of the cache for the
% specified cache nexus.
%
% The format of the SetPixelCacheNexusPixels() method is:
%
% Quantum SetPixelCacheNexusPixels(
% const CacheInfo *magick_restrict cache_info,const MapMode mode,
% const ssize_t x,const ssize_t y,const size_t width,const size_t height,
% const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o mode: ReadMode, WriteMode, or IOMode.
%
% o x,y,width,height: define the region of this particular cache nexus.
%
% o buffered: if true, nexus pixels are buffered.
%
% o nexus_info: the cache nexus to set.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType AcquireCacheNexusPixels(
const CacheInfo *magick_restrict cache_info,const MagickSizeType length,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
if (length != (MagickSizeType) ((size_t) length))
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"PixelCacheAllocationFailed","`%s'",
cache_info->filename);
return(MagickFalse);
}
nexus_info->length=0;
nexus_info->mapped=MagickFalse;
if (cache_anonymous_memory <= 0)
{
nexus_info->cache=(Quantum *) MagickAssumeAligned(AcquireAlignedMemory(1,
(size_t) length));
if (nexus_info->cache != (Quantum *) NULL)
(void) memset(nexus_info->cache,0,(size_t) length);
}
else
{
nexus_info->cache=(Quantum *) MapBlob(-1,IOMode,0,(size_t) length);
if (nexus_info->cache != (Quantum *) NULL)
nexus_info->mapped=MagickTrue;
}
if (nexus_info->cache == (Quantum *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"PixelCacheAllocationFailed","`%s'",
cache_info->filename);
return(MagickFalse);
}
nexus_info->length=length;
return(MagickTrue);
}
static inline void PrefetchPixelCacheNexusPixels(const NexusInfo *nexus_info,
const MapMode mode)
{
if (nexus_info->length < CACHE_LINE_SIZE)
return;
if (mode == ReadMode)
{
MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE,
0,1);
return;
}
MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE,1,1);
}
static Quantum *SetPixelCacheNexusPixels(
const CacheInfo *magick_restrict cache_info,const MapMode mode,
const ssize_t x,const ssize_t y,const size_t width,const size_t height,
const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickBooleanType
status;
MagickSizeType
length,
number_pixels;
assert(cache_info != (const CacheInfo *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return((Quantum *) NULL);
assert(nexus_info->signature == MagickCoreSignature);
(void) memset(&nexus_info->region,0,sizeof(nexus_info->region));
if ((width == 0) || (height == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"NoPixelsDefinedInCache","`%s'",cache_info->filename);
return((Quantum *) NULL);
}
if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) &&
(buffered == MagickFalse))
{
if (((x >= 0) && (y >= 0) &&
(((ssize_t) height+y-1) < (ssize_t) cache_info->rows)) &&
(((x == 0) && (width == cache_info->columns)) || ((height == 1) &&
(((ssize_t) width+x-1) < (ssize_t) cache_info->columns))))
{
MagickOffsetType
offset;
/*
Pixels are accessed directly from memory.
*/
offset=(MagickOffsetType) y*cache_info->columns+x;
nexus_info->pixels=cache_info->pixels+cache_info->number_channels*
offset;
nexus_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
nexus_info->metacontent=(unsigned char *) cache_info->metacontent+
offset*cache_info->metacontent_extent;
nexus_info->region.width=width;
nexus_info->region.height=height;
nexus_info->region.x=x;
nexus_info->region.y=y;
nexus_info->authentic_pixel_cache=MagickTrue;
PrefetchPixelCacheNexusPixels(nexus_info,mode);
return(nexus_info->pixels);
}
}
/*
Pixels are stored in a staging region until they are synced to the cache.
*/
if (((MagickSizeType) width > cache_info->width_limit) ||
((MagickSizeType) height > cache_info->height_limit))
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"WidthOrHeightExceedsLimit","`%s'",cache_info->filename);
return((Quantum *) NULL);
}
number_pixels=(MagickSizeType) width*height;
length=MagickMax(number_pixels,MagickMax(cache_info->columns,
cache_info->rows))*cache_info->number_channels*sizeof(*nexus_info->pixels);
if (cache_info->metacontent_extent != 0)
length+=number_pixels*cache_info->metacontent_extent;
status=MagickTrue;
if (nexus_info->cache == (Quantum *) NULL)
status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception);
else
if (nexus_info->length < length)
{
RelinquishCacheNexusPixels(nexus_info);
status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception);
}
if (status == MagickFalse)
return((Quantum *) NULL);
nexus_info->pixels=nexus_info->cache;
nexus_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
nexus_info->metacontent=(void *) (nexus_info->pixels+
cache_info->number_channels*number_pixels);
nexus_info->region.width=width;
nexus_info->region.height=height;
nexus_info->region.x=x;
nexus_info->region.y=y;
nexus_info->authentic_pixel_cache=cache_info->type == PingCache ?
MagickTrue : MagickFalse;
PrefetchPixelCacheNexusPixels(nexus_info,mode);
return(nexus_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t P i x e l C a c h e V i r t u a l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheVirtualMethod() sets the "virtual pixels" method for the
% pixel cache and returns the previous setting. A virtual pixel is any pixel
% access that is outside the boundaries of the image cache.
%
% The format of the SetPixelCacheVirtualMethod() method is:
%
% VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image,
% const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: choose the type of virtual pixel.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType SetCacheAlphaChannel(Image *image,const Quantum alpha,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
CacheView
*magick_restrict image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
image->alpha_trait=BlendPixelTrait;
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception); /* must be virtual */
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelAlpha(image,alpha,q);
q+=GetPixelChannels(image);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
image_view=DestroyCacheView(image_view);
return(status);
}
MagickPrivate VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image,
const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
VirtualPixelMethod
method;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
method=cache_info->virtual_pixel_method;
cache_info->virtual_pixel_method=virtual_pixel_method;
if ((image->columns != 0) && (image->rows != 0))
switch (virtual_pixel_method)
{
case BackgroundVirtualPixelMethod:
{
if ((image->background_color.alpha_trait != UndefinedPixelTrait) &&
(image->alpha_trait == UndefinedPixelTrait))
(void) SetCacheAlphaChannel(image,OpaqueAlpha,exception);
if ((IsPixelInfoGray(&image->background_color) == MagickFalse) &&
(IsGrayColorspace(image->colorspace) != MagickFalse))
(void) SetImageColorspace(image,sRGBColorspace,exception);
break;
}
case TransparentVirtualPixelMethod:
{
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetCacheAlphaChannel(image,OpaqueAlpha,exception);
break;
}
default:
break;
}
return(method);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c O p e n C L B u f f e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticOpenCLBuffer() makes sure that all the OpenCL operations have
% been completed and updates the host memory.
%
% The format of the SyncAuthenticOpenCLBuffer() method is:
%
% void SyncAuthenticOpenCLBuffer(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void CopyOpenCLBuffer(CacheInfo *magick_restrict cache_info)
{
assert(cache_info != (CacheInfo *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if ((cache_info->type != MemoryCache) ||
(cache_info->opencl == (MagickCLCacheInfo) NULL))
return;
/*
Ensure single threaded access to OpenCL environment.
*/
LockSemaphoreInfo(cache_info->semaphore);
cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl);
UnlockSemaphoreInfo(cache_info->semaphore);
}
MagickPrivate void SyncAuthenticOpenCLBuffer(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
cache_info=(CacheInfo *) image->cache;
CopyOpenCLBuffer(cache_info);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixelCacheNexus() saves the authentic image pixels to the
% in-memory or disk cache. The method returns MagickTrue if the pixel region
% is synced, otherwise MagickFalse.
%
% The format of the SyncAuthenticPixelCacheNexus() method is:
%
% MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to sync.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickBooleanType
status;
/*
Transfer pixels to the cache.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->cache == (Cache) NULL)
ThrowBinaryException(CacheError,"PixelCacheIsNotOpen",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return(MagickFalse);
if (image->mask_trait != UpdatePixelTrait)
{
if (((image->channels & WriteMaskChannel) != 0) &&
(ClipPixelCacheNexus(image,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if (((image->channels & CompositeMaskChannel) != 0) &&
(MaskPixelCacheNexus(image,nexus_info,exception) == MagickFalse))
return(MagickFalse);
}
if (nexus_info->authentic_pixel_cache != MagickFalse)
{
if (image->taint == MagickFalse)
image->taint=MagickTrue;
return(MagickTrue);
}
assert(cache_info->signature == MagickCoreSignature);
status=WritePixelCachePixels(cache_info,nexus_info,exception);
if ((cache_info->metacontent_extent != 0) &&
(WritePixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if ((status != MagickFalse) && (image->taint == MagickFalse))
image->taint=MagickTrue;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixelsCache() saves the authentic image pixels to the in-memory
% or disk cache. The method returns MagickTrue if the pixel region is synced,
% otherwise MagickFalse.
%
% The format of the SyncAuthenticPixelsCache() method is:
%
% MagickBooleanType SyncAuthenticPixelsCache(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType SyncAuthenticPixelsCache(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id],
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixels() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is flushed, otherwise
% MagickFalse.
%
% The format of the SyncAuthenticPixels() method is:
%
% MagickBooleanType SyncAuthenticPixels(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SyncAuthenticPixels(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL)
{
status=cache_info->methods.sync_authentic_pixels_handler(image,
exception);
return(status);
}
assert(id < (int) cache_info->number_threads);
status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id],
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImagePixelCache() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is flushed, otherwise
% MagickFalse.
%
% The format of the SyncImagePixelCache() method is:
%
% MagickBooleanType SyncImagePixelCache(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate MagickBooleanType SyncImagePixelCache(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(exception != (ExceptionInfo *) NULL);
cache_info=(CacheInfo *) GetImagePixelCache(image,MagickTrue,exception);
return(cache_info == (CacheInfo *) NULL ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ W r i t e P i x e l C a c h e M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePixelCacheMetacontent() writes the meta-content to the specified region
% of the pixel cache.
%
% The format of the WritePixelCacheMetacontent() method is:
%
% MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to write the meta-content.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register const unsigned char
*magick_restrict p;
register ssize_t
y;
size_t
rows;
if (cache_info->metacontent_extent == 0)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*
cache_info->metacontent_extent;
extent=(MagickSizeType) length*nexus_info->region.height;
rows=nexus_info->region.height;
y=0;
p=(unsigned char *) nexus_info->metacontent;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register unsigned char
*magick_restrict q;
/*
Write associated pixels to memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
q=(unsigned char *) cache_info->metacontent+offset*
cache_info->metacontent_extent;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=nexus_info->region.width*cache_info->metacontent_extent;
q+=cache_info->columns*cache_info->metacontent_extent;
}
break;
}
case DiskCache:
{
/*
Write associated pixels to disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
extent=(MagickSizeType) cache_info->columns*cache_info->rows;
for (y=0; y < (ssize_t) rows; y++)
{
count=WritePixelCacheRegion(cache_info,cache_info->offset+extent*
cache_info->number_channels*sizeof(Quantum)+offset*
cache_info->metacontent_extent,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->metacontent_extent*nexus_info->region.width;
offset+=cache_info->columns;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Write metacontent to distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WriteDistributePixelCacheMetacontent((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->metacontent_extent*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToWritePixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ W r i t e C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePixelCachePixels() writes image pixels to the specified region of the
% pixel cache.
%
% The format of the WritePixelCachePixels() method is:
%
% MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to write the pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePixelCachePixels(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register const Quantum
*magick_restrict p;
register ssize_t
y;
size_t
rows;
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) cache_info->number_channels*nexus_info->region.width*
sizeof(Quantum);
extent=length*nexus_info->region.height;
rows=nexus_info->region.height;
y=0;
p=nexus_info->pixels;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register Quantum
*magick_restrict q;
/*
Write pixels to memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
q=cache_info->pixels+cache_info->number_channels*offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->number_channels*nexus_info->region.width;
q+=cache_info->number_channels*cache_info->columns;
}
break;
}
case DiskCache:
{
/*
Write pixels to disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WritePixelCacheRegion(cache_info,cache_info->offset+offset*
cache_info->number_channels*sizeof(*p),length,(const unsigned char *)
p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->number_channels*nexus_info->region.width;
offset+=cache_info->columns;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Write pixels to distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WriteDistributePixelCachePixels((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->number_channels*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToWritePixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
|
critical_multiple.c | // PASS: *
// RUN: ${CATO_ROOT}/src/scripts/cexecute_pass.py %s -o %t
// RUN: diff <(mpirun -np 4 %t) %s.reference_output
#include <stdio.h>
#include <omp.h>
int main()
{
int x = 0;
#pragma omp parallel
{
#pragma omp critical
{
x += omp_get_thread_num();
}
#pragma omp critical
{
x += 1;
}
}
printf("x: %d\n", x);
}
|
pyfr_gemm_rm.c | /******************************************************************************
* Copyright (c) Intel Corporation - All rights reserved. *
* This file is part of the LIBXSMM library. *
* *
* For information on the license, see the LICENSE file. *
* Further information: https://github.com/hfp/libxsmm/ *
* SPDX-License-Identifier: BSD-3-Clause *
******************************************************************************/
/* Alexander Heinecke (Intel Corp.)
******************************************************************************/
#include <stdlib.h>
#include <stdio.h>
#include <sys/time.h>
#include <mkl.h>
#include <libxsmm.h>
static double sec(struct timeval start, struct timeval end) {
return ((double)(((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)))) / 1.0e6;
}
int main(int argc, char *argv[])
{
int n,m,k;
int lda,ldb,ldc;
double* a;
double* b;
double* c1;
double* c2;
struct timeval l_start, l_end;
double l_total = 0.0;
int reps, i, j;
const int nblock = 16;
double alpha = 1.0, beta = 1.0;
char transa = 'N', transb = 'N';
libxsmm_gemm_prefetch_type l_prefetch_op = LIBXSMM_PREFETCH_NONE;
libxsmm_dmmfunction kernel = NULL;
if (argc != 5) {
fprintf(stderr, "Invalid ./a,out M N K reps\n");
exit(-1);
}
m = atoi(argv[1]);
n = atoi(argv[2]);
k = atoi(argv[3]);
reps = atoi(argv[4]);
/* this is col-major what you want to use for the sizes in question */
lda = k;
ldb = n;
ldc = n;
if (n % nblock != 0) {
fprintf(stderr, "N needs to be divisable by %i\n", nblock);
exit(-1);
}
a = (double*)_mm_malloc(lda*m*sizeof(double), 64);
b = (double*)_mm_malloc(ldb*k*sizeof(double), 64);
c1 = (double*)_mm_malloc(ldc*m*sizeof(double), 64);
c2 = (double*)_mm_malloc(ldc*m*sizeof(double), 64);
#pragma omp parallel for
for (i = 0; i < lda*m; i++) {
a[i] = libxsmm_rng_f64();
}
#pragma omp parallel for
for (i = 0; i < ldb*k; i++) {
b[i] = libxsmm_rng_f64();
}
#pragma omp parallel for
for (i = 0; i < ldc*m; i++) {
c1[i] = 0;
c2[i] = 0;
}
/* JIT Kernel */
kernel = libxsmm_dmmdispatch(nblock, m, k, &ldb, &lda, &ldc, NULL, NULL, NULL, &l_prefetch_op );
if (kernel == 0) {
printf("JIT failed, exiting\n");
exit(-1);
}
/* init MKL */
dgemm(&transb, &transa, &n, &m, &k, &alpha, b, &ldb, a, &lda, &beta, c1, &ldc);
#pragma omp parallel for
for (i = 0; i < ldc*m; i++) {
c1[i] = 0;
c2[i] = 0;
}
gettimeofday(&l_start, NULL);
for ( j = 0; j < reps; j++ ) {
dgemm(&transb, &transa, &n, &m, &k, &alpha, b, &ldb, a, &lda, &beta, c1, &ldc);
}
gettimeofday(&l_end, NULL);
l_total = sec(l_start, l_end);
fprintf(stdout, "time[s] MKL (RM, M=%i, N=%i, K=%i): %f\n", m, n, k, l_total/(double)reps );
fprintf(stdout, "GFLOPS MKL (RM, M=%i, N=%i, K=%i): %f\n", m, n, k, (2.0 * (double)m * (double)n * (double)k * (double)reps * 1.0e-9) / l_total );
fprintf(stdout, "GB/s MKL (RM, M=%i, N=%i, K=%i): %f\n", m, n, k, ((double)sizeof(double) * (((double)m * (double)n) + ((double)k * (double)n)) * (double)reps * 1.0e-9) / l_total );
gettimeofday(&l_start, NULL);
for ( j = 0; j < reps; j++ ) {
#pragma omp parallel for private(i)
for ( i = 0; i < n; i+=nblock) {
kernel( b+i, a, c2+i, NULL, NULL, NULL );
}
gettimeofday(&l_end, NULL);
}
l_total = sec(l_start, l_end);
fprintf(stdout, "time[s] libxsmm (RM, M=%i, N=%i, K=%i): %f\n", m, n, k, l_total/(double)reps );
fprintf(stdout, "GFLOPS libxsmm (RM, M=%i, N=%i, K=%i): %f\n", m, n, k, (2.0 * (double)m * (double)n * (double)k * (double)reps * 1.0e-9) / l_total );
fprintf(stdout, "GB/s libxsmm (RM, M=%i, N=%i, K=%i): %f\n", m, n, k, ((double)sizeof(double) * (((double)m * (double)n) + ((double)k * (double)n)) * (double)reps * 1.0e-9) / l_total );
/* test result */
double max_error = 0.0;
for ( i = 0; i < ldc*m; i++) {
if (max_error < fabs(c1[i] - c2[i])) {
max_error = fabs(c1[i] - c2[i]);
}
}
printf("max error: %f\n\n", max_error);
}
|
orderedConstruct.c | int main() {
int x = 10;
#pragma omp parallel
{
int localX, localY;
#pragma omp for
for(localX = 0; local < 10; localX++) {
#pragma omp ordered
{
localY = x;
}
}
localX = 20;
}
x = 30;
}
|
GB_unop__identity_fp32_uint8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_fp32_uint8
// op(A') function: GB_unop_tran__identity_fp32_uint8
// C type: float
// A type: uint8_t
// cast: float cij = (float) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
float z = (float) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = (float) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_fp32_uint8
(
float *Cx, // Cx and Ax may be aliased
const uint8_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (uint8_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t aij = Ax [p] ;
float z = (float) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint8_t aij = Ax [p] ;
float z = (float) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_fp32_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__lnot_uint64_uint16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint64_uint16
// op(A') function: GB_tran__lnot_uint64_uint16
// C type: uint64_t
// A type: uint16_t
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
uint64_t z = (uint64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT64 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint64_uint16
(
uint64_t *restrict Cx,
const uint16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint64_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__identity_uint16_int64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint16_int64)
// op(A') function: GB (_unop_tran__identity_uint16_int64)
// C type: uint16_t
// A type: int64_t
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint16_t z = (uint16_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint16_t z = (uint16_t) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint16_int64)
(
uint16_t *Cx, // Cx and Ax may be aliased
const int64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t aij = Ax [p] ;
uint16_t z = (uint16_t) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int64_t aij = Ax [p] ;
uint16_t z = (uint16_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint16_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
SourceCode.c | #ifdef _WIN32
#include <stdio.h>
#else
#define _GNU_SOURCE // for fcloseall() on linux
#include <stdio.h>
#endif
#include <stdbool.h>
#include <string.h>
#include <omp.h>
#include <math.h>
#include <SpiceUsr.h>
#include <ini.h>
#include <PIConfig.h>
#ifdef _WIN32
#include <windows.h> // only needed for Sleep()
#include <direct.h> // only needed for _mkdir()
#else
#include <unistd.h> // only needed for usleep()
#include <sys/stat.h> // only needed for mkdir()
#endif
// Activate timing: Add preprocessor definition "__WTIMING"
#ifdef __WTIMING
#include <time.h>
#endif
// Check for leaked memory at the end of the run (MSVC only): Add preprocessor definition "__CHKMEMLEAK"
#ifdef __CHKMEMLEAK
#define _CRTDBG_MAP_ALLOC
#include <stdlib.h>
#include <crtdbg.h>
#else // Always include
#include <stdlib.h>
#endif // __CHKMEMLEAK
//BEGIN Function cross-platform compatibility
#ifdef _WIN32
#define SLEEP( a1 ) Sleep( a1 )
#define mkdir( a1, a2 ) _mkdir( a1 )
#else
#define SLEEP( a1 ) usleep( a1 * 1000 )
#endif
// Avoid MSVC level 3 warning C4996
#ifdef _WIN32
#define strdup _strdup
#define strcpy strcpy_s
#define sscanf sscanf_s
#define strtok_r strtok_s
#define fcloseall _fcloseall
#else
#define strcpy( a1, a2, a3 ) strcpy( a1, a3 )
#define fopen_s( a1, a2, a3 ) *a1 = fopen( a2, a3 )
#define sprintf_s snprintf
#endif
#ifdef _WIN32 // platform-specific separators in paths
#define OS_SEP "\\"
#else
#define OS_SEP "/"
#endif
//END Function cross-platform compatibility
// Custom header files
#include <PI_types.h> // configuration_values, configuration_readout
#include <IntegEnv.h> // get_body_state(), calc_accel(), calc_save_factor(), printpdata(), calc_pInfo(), interp_body_states_malloc(), interp_body_states_free(), interp_body_states(), precompute_dtime_powers()
#include <RungeKutta4.h>
#include <RungeKutta76.h>
bool particle_in_file(int p, char already_done_path[]);
bool particle_incomplete(char outputpath[], SpiceDouble *nstate);
int read_configuration(configuration_values *config_data);
int convert_results_into_binary(configuration_values *config_data, int particles_count, double *multiplication_factor, char already_done_path[], char encounter_path[]);
void printinfo();
//Main Program
int main(int argc, char *argv[])
{
// Version and build info output
printinfo();
//Create some variables
int j, k, e, p, g, c, error_code = 0, particles_count = 0, particles_done = 0, nCommentLines = 0;
char temp[260], *next_token = NULL, already_done_path[260] = "INPUT" OS_SEP "processed_particles.txt", encounter_path[260] = "INPUT" OS_SEP "encounter_particles.txt";
bool commentLine = false;
// Check for command-line argument -t: save runtime.txt file, for example for MATLAB processing
FILE* runtimefn;
bool printtimefile = 0;
if (argc >= 2)
{
for (j = 0; j < argc; j++)
{
if (j)
{
printf("\nInput argument set: ");
for (k = 0; k < (int)strlen(argv[j]); k++)
{
printf("%c", argv[j][k]);
}
}
if (strcmp(argv[j], "-t") == 0)
{
printf("\n Will save runtime.txt.");
fopen_s(&runtimefn, "runtime.txt", "w");
printtimefile = 1;
break;
}
}
}
// Initialize config_data
configuration_values config_data =
{
.algorithm = 0,
.outputpath = "OUTPUT" OS_SEP,
.number_of_threads = 0,
.final_time = 0.,
.start_time_save = 0.,
.dv_step = 0.,
.e_target = 0.,
.first_particle_number = 0,
.particle_mass = 0.,
.particle_density = 0.,
.particle_radius = 0.,
.save_as_binary = 0
};
// Allocate memory below body char pointers
for (j = 0; j < 10; j++)
{
config_data.body_char[j] = malloc(3 * sizeof(ConstSpiceChar));
}
//Load Spice kernels
printf("\nLoading kernels... ");
furnsh_c("kernels_generic.txt");
printf("...done.");
// Read configuration file
printf("\nLoading configuration... ");
if (read_configuration(&config_data) != 0)
{
printf("\n\nerror: could not read configuration.\n");
return 1;
}
printf("...done.");
printf("\nProcessing configuration... ");
if (calc_pInfo(&config_data) != 0)
{
printf("\n\nerror: could not process configuration values.\n");
return 1;
}
printf("...done.");
//LOAD PARTICLES
printf("\nLoading particles... ");
FILE *particles_start_file;
SpiceDouble **particles_start;
fopen_s(&particles_start_file, config_data.inputfpath, "r");
if (particles_start_file == NULL)
{
printf("\n\nerror: could not load particles.\n");
//SLEEP(4000);
return 1;
}
j = 0;
while ((c = fgetc(particles_start_file)) != EOF) // requires newline before eof
{
if (c == '%')
{
commentLine = true;
}
else if (c == '\n')
{
if (commentLine == false)
{
particles_count++;
}
else
{
commentLine = false;
nCommentLines++;
}
}
}
particles_start = malloc((particles_count + 1) * sizeof(SpiceDouble *));
for (j = 0; j < particles_count; j++)
{
particles_start[j] = malloc(8 * sizeof(SpiceDouble));
}
fclose(particles_start_file);
fopen_s(&particles_start_file, config_data.inputfpath, "r");
j = -nCommentLines;
while (fgets(temp, sizeof(temp), particles_start_file) != NULL)
{
if (j >= 0) // Warning: sscanf functions will cause a crash if parameters are missing in the input file
{
char* cval = strtok_r(temp, "\t", &next_token);
for (g = 0; g < 6; g++)
{
sscanf(cval, "%lf", &particles_start[j][g]);
cval = strtok_r(NULL, "\t", &next_token);
}
sscanf(cval, "%lf", &particles_start[j][6]);
cval = strtok_r(NULL, "\n", &next_token);
sscanf(cval, "%lf", &particles_start[j][7]);
}
j++;
}
int last_particle_number = config_data.first_particle_number + particles_count - 1;
fclose(particles_start_file);
printf("...done. %d particles loaded.\n", particles_count);
//Print config
if (config_data.algorithm == 1)
printf("\n algorithm = RK4");
else if (config_data.algorithm == 2)
{
printf("\n algorithm = RK76");
printf("\n interpolation order = %d", config_data.interp_order);
}
else
printf("\n algorithm unknown.");
if (config_data.number_of_threads > 1)
printf("\n number of threads = %d", config_data.number_of_threads);
printf("\n final_time = %.16le", config_data.final_time);
if (config_data.start_time_save > (double)-3.155e+10)
printf("\n start_time_save = %.6le", config_data.start_time_save);
if (config_data.save_as_binary){
printf("\n saving output as binary (.ctwu)");
}
else {
printf("\n saving output as text (.txt)");
}
printf("\n e_save_slope = %d", config_data.e_save_slope);
printf("\n e_save_max = %d", config_data.e_save_max);
if (config_data.endontime){
printf("\n end on time yes");
}
else {
printf("\n end on time no");
}
printf("\n bodys_ID =");
for (j = 0; j < config_data.N_bodys; j++)
printf(" %d", config_data.body_int[j]);
if (config_data.ssb_centered == 1)
printf("\n ssb-centered = %d", config_data.ssb_centered);
if (config_data.algorithm == 1)
printf("\n dv_step = %le", config_data.dv_step);
else if (config_data.algorithm == 2)
printf("\n e_target = %le", config_data.e_target);
if (config_data.particle_mass > 0.)
{
printf("\n particle_mass = %le", config_data.particle_mass);
printf("\n particle_density = %le", config_data.particle_density);
printf("\n particle_radius = %.12le", config_data.particle_radius);
printf("\n beta = %.12le", config_data.beta);
}
for (j = 0; j < config_data.N_bodys; j++)
{
if (config_data.body_int[j] == 10)
printf("\n beta-corr. solar GM = %.12le", config_data.GM[j] * (1. - config_data.beta));
}
if (config_data.first_particle_number != 1)
printf("\n first_particle_number = %d", config_data.first_particle_number);
printf("\n save_nth = %d", config_data.n);
if (config_data.only_encounters)
{
printf("\n encounter_rad = %.12le", config_data.encounter_rad);
printf("\n encounter_int = %d", config_data.encounter_body_int);
}
//Check for progress.txt
FILE *progress, *already_done;
fopen_s(&progress, "progress.txt", "r+");
if (progress == NULL)
{
for (e = 0; e < 3; e++)
{
fopen_s(&progress, "progress.txt", "w");
if (progress == NULL) // If opening file failed, wait 100 ms and try again.
{
perror("The following error occurred");
SLEEP(100);
if (e == 2) // After 3 failed attempts, abort.
{
printf("\n\nerror: could not create progress.txt");
return 2;
}
}
else
{
fprintf(progress, "0.0");
break;
}
}
}
fclose(progress);
//Check for processed_particles.txt and count particles already processed
fopen_s(&already_done, already_done_path, "r+");
if (already_done == NULL)
{
printf("\n\n First run. Creating processed_particles.txt");
for (e = 0; e < 3; e++)
{
fopen_s(&already_done, already_done_path, "w");
if (already_done == NULL)
{
perror("The following error occurred");
SLEEP(100);
if (e == 2)
{
printf("\n\nerror: could not create processed_particles.txt");
return 2;
}
}
else
{
break;
}
}
}
else
{
//Count the number of particles already done
while ((c = fgetc(already_done)) != EOF)
{
if (c == '\n')
{
particles_done++;
}
}
}
fclose(already_done);
#ifdef __WTIMING
clock_t start = clock(); // Start clock
#endif
//Start (parallel) computing
printf("\n\n Numerical approximation started...");
/*--------------------------------------------------------------------------------------------------------------------------*/
#pragma omp parallel private(j, e) num_threads(config_data.number_of_threads)
{
int th_id = omp_get_thread_num();
//Allocate nstate
SpiceDouble nstate[7];
//Loop over particles
#pragma omp for
for (p = config_data.first_particle_number; p <= last_particle_number; p++)
{
int err = 0;
//Check if particle has already been processed completely
if (particle_in_file(p, already_done_path))
{
printf("\n particle #%d has already been processed", p);
continue;
}
//Set path where to save the particle
char particle_path[260] = "";
sprintf_s(particle_path, 260, "%s_#%d%s", config_data.outputpath, p, ".txt");
//Set particle start_time
SpiceDouble start_time = particles_start[p - config_data.first_particle_number][7];
//Check if particle has been processed but is incomplete
if (particle_incomplete(particle_path, nstate))
{
printf("\n particle #%d will be continued", p);
}
else
{
//Set initial nstate
for (j = 0; j < 6; j++)
nstate[j] = particles_start[p - config_data.first_particle_number][j];
nstate[6] = start_time;
//Create initial file
FILE* init;
for (e = 0; e < 3; e++)
{
fopen_s(&init, particle_path, "w");
if (init == NULL)
{
SLEEP(100);
if (e == 2)
{
err = 1;
printf("\n\nerror: could not create initial output file");
break;
}
}
else
{
if (printpdata(init, nstate))
{
err = 1;
printf("\n\nerror: initial output is NAN.");
break;
}
fclose(init);
break;
}
}
}
//Create File for output
FILE *statefile;
fopen_s(&statefile, particle_path, "a");
if (statefile == NULL)
{
printf("\n\nerror: could not write to output file");
err = 1;
}
//Integrate particle
if (err == 0)
{
switch (config_data.algorithm)
{
case 1:
err = RungeKutta4(&config_data, nstate, statefile);
break;
case 2:
err = RungeKutta76(&config_data, nstate, statefile);
break;
default:
err = 1;
printf("\n\nerror: unknown integration algorithm: %d", config_data.algorithm);
}
fclose(statefile);
}
//Write the particle number to the already-done file and update progress.txt
FILE* done;
double fraction;
#pragma omp critical(ALREADYDONE)
{
particles_done++;
for (e = 0; e < 3; e++)
{
fopen_s(&done, already_done_path, "a+");
if (done == NULL)
{
SLEEP(100);
if (e == 2)
{
err = 1;
printf("\n\nerror: could not write to processed_particles.txt");
break;
}
}
else
{
fprintf(done, "%d\n", p);
fclose(done);
break;
}
}
} // END omp critical(ALREADYDONE)
//Write the particle number to the encounter-list file if it encountered the body
if (config_data.only_encounters)
{
FILE* enc;
if (config_data.encounter)
{
#pragma omp critical(ENCOUNTERF)
{
particles_done++;
for (e = 0; e < 3; e++)
{
fopen_s(&enc, encounter_path, "a+");
if (enc == NULL)
{
SLEEP(100);
if (e == 2)
{
err = 1;
printf("\n\nerror: could not write to encounter_particles.txt");
break;
}
}
else
{
fprintf(enc, "%d\n", p);
fclose(enc);
break;
}
}
} // END omp critical(ENCOUNTERF)
config_data.encounter = 0;
}
}
#pragma omp critical(PROCESS)
{
for (e = 0; e < 3; e++)
{
fopen_s(&progress, "progress.txt", "w");
if (progress == NULL)
{
SLEEP(100);
if (e == 2)
{
//err = 1;
printf("\n\nwarning: could not write to progress.txt (non-relevant)");
break;
}
}
else
{
fraction = (float)particles_done / particles_count;
fprintf(progress, "%f", fraction);
fclose(progress);
break;
}
}
} // END omp critical(PROCESS)
if (err != 0)
{
error_code += err;
printf("\n particle #%d was not successfully completed", p);
}
else
{
printf("\n particle #%d done on thread %d", p, th_id);
}
}
#ifdef __WTIMING
//Print elapsed time
#pragma omp barrier
#pragma omp master
{
remove(already_done_path);
//Print time
clock_t end = clock();
double elapsed_time = (end - start) / (double)CLOCKS_PER_SEC;
printf("\n\n Elapsed time: %1.3f s", elapsed_time);
if (printtimefile == 1)
{
if (runtimefn == NULL)
{
printf("\n\nwarning: could not write to runtime.txt (non-relevant)");
}
else
{
fprintf(runtimefn, "%.8le", elapsed_time);
}
}
}
#endif // __WTIMING
}
//Convert .txt output into binary
if (config_data.save_as_binary == 1)
{
double *multiplication_factor;
multiplication_factor = malloc(particles_count * sizeof(double));
if (multiplication_factor == NULL)
{
printf("\n\nerror: could not allocate multiplication_factor array (OOM)");
return 2;
}
for (j = 0; j < particles_count; j++)
{
multiplication_factor[j] = particles_start[j][6];
}
if (convert_results_into_binary(&config_data, particles_count, multiplication_factor, already_done_path, encounter_path) != 0)
{
printf("\n\nerror: could not convert to binary");
return 2;
}
}
//Deallocate arrays
for (j = 0; j < particles_count; j++)
free(particles_start[j]);
free(particles_start);
if (error_code == 0)
{
printf("\n\nAll particles are done. Everything is OK!\n");
return 0;
}
else
{
printf("\n\nWarning: %d particles may have been skipped!\n", error_code);
return 2;
}
#ifdef __CHKMEMLEAK
_CrtDumpMemoryLeaks();
#endif
}
//Functions
bool particle_in_file(int p, char path[])
{
FILE* check;
char temp[16] = "";
int particle_ID;
bool answer = false;
#pragma omp critical(ALREADYDONE)
{
fopen_s(&check, path, "r");
if (check != NULL)
{
while (fgets(temp, 16, check) != NULL)
{
sscanf(temp, "%d", &particle_ID);
if (particle_ID == p)
{
answer = true;
}
}
fclose(check);
}
else
{
printf("\n\nwarning: could not access processed_particles.txt");
}
}
return answer;
}
bool particle_incomplete(char particle_path[], SpiceDouble *nstate)
{
int err = 0;
FILE *check, *tempfile;
char temp1[260], temp2[260], temp3[260], *next_token = NULL;
bool answer = false;
fopen_s(&check, particle_path, "r");
if (check != NULL)
{
#pragma omp critical(TEMP)
{
fopen_s(&tempfile, "temp.txt", "w");
if (tempfile == NULL)
{
printf("\n\nerror: could not create tempfile; particle restarted");
err = 1;
}
else
{
int c = 0;
while (fgets(temp1, sizeof(temp1), check) != NULL)
{
if (c > 0)
{
strcpy(temp3, 260, temp2);
}
strcpy(temp2, 260, temp1);
if (c > 0)
{
fprintf(tempfile, "%s", temp3);
}
c++;
}
fclose(tempfile);
fclose(check);
if (c > 1)
{
char* cval = strtok_r(temp3, "\t", &next_token);
sscanf(cval, "%lf", &(nstate)[0]);
cval = strtok_r(NULL, "\t", &next_token);
sscanf(cval, "%lf", &(nstate)[1]);
cval = strtok_r(NULL, "\t", &next_token);
sscanf(cval, "%lf", &(nstate)[2]);
cval = strtok_r(NULL, "\t", &next_token);
sscanf(cval, "%lf", &(nstate)[3]);
cval = strtok_r(NULL, "\t", &next_token);
sscanf(cval, "%lf", &(nstate)[4]);
cval = strtok_r(NULL, "\t", &next_token);
sscanf(cval, "%lf", &(nstate)[5]);
cval = strtok_r(NULL, "\n", &next_token);
sscanf(cval, "%lf", &(nstate)[6]);
answer = true;
//Write the output file again without the last line
fopen_s(&tempfile, "temp.txt", "r");
if (tempfile == NULL)
{
printf("\n\nerror: could not create tempfile; particle restarted");
err = 1;
}
else
{
fopen_s(&check, particle_path, "w");
if (check == NULL)
{
printf("\n\nerror: could not read incomplete outputfile; particle restarted");
err = 1;
}
else
{
while (fgets(temp1, sizeof(temp1), tempfile) != NULL)
{
fprintf(check, "%s", temp1);
}
fclose(check);
}
fclose(tempfile);
}
}
}
remove("temp.txt");
}
if (err != 0)
{
fclose(check);
return false;
}
fclose(check);
}
return answer;
}
static int handler(void* user, const char* section, const char* name, const char* value)
{
configuration_readout* pconfig = (configuration_readout*)user;
#define MATCH(s, n) strcmp(section, s) == 0 && strcmp(name, n) == 0
if (MATCH("simulation", "ALGORITHM")) {
free(pconfig->algo);
pconfig->algo = strdup(value);
}
else if (MATCH("simulation", "SSB_CENTERED")) {
pconfig->ssbc = atoi(value);
}
else if (MATCH("simulation", "FINAL_TIME")) {
free(pconfig->finaltime);
pconfig->finaltime = strdup(value);
}
else if (MATCH("simulation", "START_TIME_SAVE")) { // backward compatibility, now in [saving]
free(pconfig->starttimes);
pconfig->starttimes = strdup(value);
}
else if (MATCH("simulation", "N_BODYS")) {
pconfig->nbodys = atoi(value);
}
else if (MATCH("simulation", "BODYS_ID")) {
free(pconfig->bodysid);
pconfig->bodysid = strdup(value);
}
else if (MATCH("simulation", "ENDONTIME")) {
pconfig->endontime = atoi(value);
}
else if (MATCH("rk4", "DV_STEP")) {
free(pconfig->dvstep);
pconfig->dvstep = strdup(value);
}
else if (MATCH("rk76", "E_TARGET")) {
free(pconfig->etarget);
pconfig->etarget = strdup(value);
}
else if (MATCH("rk76", "IORDER")) {
pconfig->iorder = atoi(value);
}
else if (MATCH("simulation", "SAVE_NTH_MULTIPLIER")) { // backward compatibility, now in [saving]
free(pconfig->mult);
pconfig->mult = strdup(value);
}
else if (MATCH("simulation", "NUMBER_OF_THREADS")) {
pconfig->nthreads = atoi(value);
}
else if (MATCH("simulation", "SAVE_AS_BINARY")) { // backward compatibility, now in [saving]
pconfig->savebin = atoi(value);
}
else if (MATCH("particles", "PARTICLE_INPUT_FILE_NAME")) {
free(pconfig->inputfn);
pconfig->inputfn = strdup(value);
}
else if (MATCH("particles", "PARTICLE_OUTPUT_FILE_NAME")) {
free(pconfig->outputfn);
pconfig->outputfn = strdup(value);
}
else if (MATCH("particles", "PARTICLE_MASS")) {
free(pconfig->pmass);
pconfig->pmass = strdup(value);
}
else if (MATCH("particles", "PARTICLE_DENSITY")) {
free(pconfig->pdensity);
pconfig->pdensity = strdup(value);
}
else if (MATCH("particles", "Q_PR")) {
free(pconfig->q_pr);
pconfig->q_pr = strdup(value);
}
else if (MATCH("particles", "FIRST_PARTICLE_NUMBER")) {
pconfig->fpnum = atoi(value);
}
else if (MATCH("saving", "SAVE_NTH_MULTIPLIER")) {
free(pconfig->mult);
pconfig->mult = strdup(value);
}
else if (MATCH("saving", "SAVE_AS_BINARY")) {
pconfig->savebin = atoi(value);
}
else if (MATCH("saving", "ENCOUNTER_SLOPE")) {
pconfig->e_slope = atoi(value);
}
else if (MATCH("saving", "ENCOUNTER_MAX")) {
pconfig->e_max = atoi(value);
}
else if (MATCH("saving", "START_TIME_SAVE")) {
free(pconfig->starttimes);
pconfig->starttimes = strdup(value);
}
else if (MATCH("encounter", "ONLY_ENCOUNTERS")) {
pconfig->enc_only = atoi(value);
}
else if (MATCH("encounter", "ENCOUNTER_INT")) {
pconfig->enc_int = atoi(value);
}
else if (MATCH("encounter", "ENCOUNTER_RAD")) {
free(pconfig->enc_rad);
pconfig->enc_rad = strdup(value);
}
else {
#if defined(RELTYPERWDI) || defined(RELTYPEDEB)
printf("\n\nwarning: Unknown configuration setting.");
#endif // RELTYPERWDI || RELTYPEDEB
return 0; /* unknown section/name, error */
}
return 1;
}
int read_configuration(configuration_values *config_data)
{
char temp[260] = "", *token, *next_token = NULL, inputpath[260] = ("INPUT" OS_SEP), configpath[260] = "";
SpiceInt dim, j;
SpiceDouble mult = 0.0;
sprintf_s(configpath, 260, "%s%s", inputpath, "configuration.ini");
// Set default values: initialize config struct
configuration_readout config =
{
/* [particles] */
.inputfn = (char *)malloc(261), // not C90 compatible
.outputfn = (char *)malloc(261),
.pmass = (char *)malloc(31),
.q_pr = (char *)malloc(31),
.pdensity = (char *)malloc(31),
.fpnum = 1,
/* [simulation] */
.algo = (char *)malloc(11),
.ssbc = 0,
.finaltime = (char *)malloc(101),
.starttimes = (char *)malloc(101),
.nbodys = 0,
.bodysid = (char *)malloc(41),
.nthreads = 1,
.endontime = 0,
/* [saving] */
.mult = (char *)malloc(31),
.savebin = 1,
.e_slope = 4,
.e_max = 40,
/* [encounter] */
.enc_only = 0,
.enc_int = 3,
.enc_rad = (char *)malloc(31),
/* Algorithm-specific */
/* [rk4] */
.dvstep = (char *)malloc(31),
/* [rk76] */
.etarget = (char *)malloc(31),
.iorder = 5
};
if (config.algo == NULL
|| config.finaltime == NULL
|| config.starttimes == NULL
|| config.bodysid == NULL
|| config.mult == NULL
|| config.inputfn == NULL
|| config.outputfn == NULL
|| config.pmass == NULL
|| config.q_pr == NULL
|| config.pdensity == NULL
|| config.enc_rad == NULL
|| config.dvstep == NULL
|| config.etarget == NULL) // At least one alloc ran OOM
{
printf("\n\nerror: could not allocate memory for config char*s (OOM)");
return 1;
}
// set default values for char*s
strcpy(config.algo, 10, "RK76");
strcpy(config.finaltime, 100, "");
strcpy(config.starttimes, 100, "1 JAN 1000");
strcpy(config.bodysid, 40, "10");
strcpy(config.mult, 30, "20.");
strcpy(config.inputfn, 260, "");
strcpy(config.outputfn, 260, "default");
strcpy(config.pmass, 30, "0.");
strcpy(config.q_pr, 30, "1.");
strcpy(config.pdensity, 30, "1000.");
strcpy(config.enc_rad, 30, "1.0e7");
strcpy(config.dvstep, 30, "10e-3");
strcpy(config.etarget, 30, "10e-18");
// Parse configuration file
if (ini_parse(configpath, handler, &config) < 0) {
printf("Can't load 'configuration.ini'\n");
return 2;
}
//Set algorithm
if (config.algo == 0)
{
printf("\n\nerror: ALGORITHM not set");
SLEEP(1000);
return 1;
}
if (strcmp(config.algo, "RK4") == 0)
{
config_data->algorithm = 1;
}
else if (strcmp(config.algo, "RK76") == 0)
{
config_data->algorithm = 2;
}
else
{
config_data->algorithm = 0; // invalid input
}
//Center bodies at SSB?
config_data->ssb_centered = (bool)config.ssbc;
//Set number of threads
config_data->number_of_threads = config.nthreads;
//Save output as binary?
config_data->save_as_binary = (bool)config.savebin;
//Saving increase slope
config_data->e_save_slope = config.e_slope;
//Maximum increase in save rate
config_data->e_save_max = config.e_max;
//End on time?
config_data->endontime = (bool)config.endontime;
//Set final date of the simulation
if (strcmp(config.finaltime, "") == 0)
{
printf("\n\nerror: FINAL_TIME not set");
SLEEP(1000);
return 1;
}
str2et_c(config.finaltime, &config_data->final_time);
//Set start date for saving
if ((strcmp(config.starttimes, "0") == 0) || (strcmp(config.starttimes, "") == 0))
{
config_data->start_time_save = -1e+11; // set start time to a long time ago.
}
else
{
str2et_c(config.starttimes, &config_data->start_time_save);
}
//Set bodies
if (config.nbodys == 0)
{
printf("\n\nerror: N_BODYS not set");
SLEEP(1000);
return 1;
}
config_data->N_bodys = config.nbodys;
strcpy(temp, sizeof(temp), config.bodysid);
token = strtok_r(temp, " ", &next_token);
for (j = 0; j < config_data->N_bodys; j++)
{
if (token == NULL)
{
printf("\n\nerror: BODYS_ID not set or not enough arguments");
SLEEP(1000);
return 1;
}
sscanf(token, "%d", &config_data->body_int[j]);
token = strtok_r(NULL, " ", &next_token);
sprintf_s((char *)config_data->body_char[j], 3, "%d", config_data->body_int[j]);
bodvcd_c(config_data->body_int[j], "GM", config_data->N_bodys, &dim, &config_data->GM[j]); // Get standard gravitational parameter of each body (GM)
}
//Set step size control (rk4)
sscanf(config.dvstep, "%lf", &config_data->dv_step);
//Set target error per step
sscanf(config.etarget, "%lf", &config_data->e_target);
//Set which nth state is saved to disc
sscanf(config.mult, "%lf", &mult);
if (mult < 0.00000000001)
{
//Save every 10nth state. This produces high density of states in the output file and is intended to be used when testing the integrator.
config_data->n = 10;
}
else
{
if (config_data->algorithm == 1) // RK4
{
config_data->n = (int)(mult / config_data->dv_step + 0.5);
}
else if (config_data->algorithm == 2) // RK76
{
// Close to constant number of total steps saved across e_target values. 1.4e3 is a factor imitating the number of steps that would be saved with RK4.
config_data->n = (int)(mult / 1.4e3 * pow(10,(3.6072 - 0.0746 * log10( config_data->e_target ))) + 0.5);
// Interpolation order?
config_data->interp_order = config.iorder;
}
else // Unknown algorithm
{
config_data->n = 10;
}
}
#ifdef __SaveRateOpt
// Initialize step multiplier value with 1
config_data->step_multiplier = 1.;
#endif
//Set which particle to start and end with (particle number, from 1 to the number of particles in the input file)
config_data->first_particle_number = config.fpnum;
//Set name of the input/output file
strcpy(temp, sizeof(temp), config.inputfn);
if (strcmp(temp, "") == 0)
{
printf("\n\nerror: PARTICLE_INPUT_FILE_NAME not set");
SLEEP(1000);
return 1;
}
sprintf_s(config_data->inputfpath, 260, "%s%s%s", inputpath, temp, ".txt");
if (strcmp(config.outputfn, "default"))
{
strcpy(temp, sizeof(temp), config.outputfn);
}
if (mkdir(config_data->outputpath, 0777))
{
printf("...skip mkdir... ");
}
char outputfile[260] = "";
sprintf_s(outputfile, 260, "%s%s", config_data->outputpath, temp);
strcpy(config_data->outputpath, 260, outputfile);
// Set Mie scattering coefficient
sscanf(config.q_pr, "%lf", &config_data->q_pr);
//Set mass of particles
sscanf(config.pmass, "%lf", &config_data->particle_mass);
if (config_data->particle_mass > 0.)
{
//Set density of particles
sscanf(config.pdensity, "%lf", &config_data->particle_density);
}
// encounter-variables
config_data->only_encounters = (bool)config.enc_only;
if (config_data->only_encounters == 1)
{
sscanf(config.enc_rad, "%lf", &config_data->encounter_rad);
config_data->encounter_body_int = config.enc_int;
}
// Free memory allocated for config char*s
free(config.algo);
free(config.finaltime);
free(config.starttimes);
free(config.bodysid);
free(config.mult);
free(config.inputfn);
free(config.outputfn);
free(config.pmass);
free(config.q_pr);
free(config.pdensity);
free(config.enc_rad);
free(config.dvstep);
free(config.etarget);
return 0;
}
int convert_results_into_binary(configuration_values *config_data, int particles_count, double *multiplication_factor, char already_done_path[], char encounter_path[])
{
printf("\n Converting text output into binary... ");
//Create some variables
int j, e, c, h, state_count, i, result_array_length=1, particle_header_row, l, g, save_particle = 1;
FILE *output_file;
char *next_token = NULL, temp[260];
double tempdouble;
//Allocate beginning of result_array
float **result_array;
result_array = malloc(1 * sizeof(float *));
if (result_array == NULL)
{
printf("\n\nerror: could not allocate result_array (OOM)");
return 2;
}
result_array[0] = malloc(7 * sizeof(float));
if (result_array[0] == NULL)
{
printf("\n\nerror: could not allocate result_array (OOM)");
free(result_array);
return 2;
}
//Set file header
result_array[0][0] = (float)config_data->first_particle_number;
result_array[0][1] = (float)(config_data->first_particle_number + particles_count - 1);
result_array[0][2] = (float)config_data->particle_mass;
result_array[0][3] = (float)config_data->particle_density;
result_array[0][4] = (float)config_data->beta;
result_array[0][5] = 0;
result_array[0][6] = 0;
//Read in all the particles and save them in result_array
for (j = 0; j < particles_count; j++)
{
if (config_data->only_encounters)
{
save_particle = particle_in_file(j, encounter_path);
} // else 1
if (save_particle)
{
state_count = 0;
char particle_path[260] = "";
sprintf_s(particle_path, 260, "%s_#%d%s", config_data->outputpath, (j + config_data->first_particle_number), ".txt");
for (e = 0; e < 3; e++)
{
fopen_s(&output_file, particle_path, "r");
if (output_file == NULL)
{
SLEEP(100);
if (e == 2)
{
printf("\n\nerror: could not open .txt file particle #%d for conversion", j + config_data->first_particle_number);
return 2;
}
}
else
{
break;
}
}
while ((c = fgetc(output_file)) != EOF)
{
if (c == '\n')
{
state_count++;
}
}
particle_header_row = result_array_length;
result_array_length += state_count + 1;
float **temp_result_array;
temp_result_array = realloc(result_array, (result_array_length)*sizeof(float *));
if (temp_result_array == NULL)
{
printf("\n\nerror: could not allocate result_array (OOM)");
free(result_array);
return 2;
}
else{
result_array = temp_result_array;
}
for (i = particle_header_row; i < result_array_length; i++)
{
result_array[i] = malloc(7 * sizeof(float));
if (result_array[i] == NULL)
{
printf("\n\nerror: could not allocate result_array (OOM)");
free(result_array);
return 2;
}
}
result_array[particle_header_row][0] = 0;
result_array[particle_header_row][1] = 0;
result_array[particle_header_row][2] = (float)(particle_header_row+1); //row number of this particle's first state
result_array[particle_header_row][3] = (float)(particle_header_row+state_count); //row number of this particle's last state
result_array[particle_header_row][4] = (float)(j + config_data->first_particle_number);
result_array[particle_header_row][5] = (float)(multiplication_factor[j]);
result_array[particle_header_row][6] = 0;
rewind(output_file);
l = particle_header_row;
while (fgets(temp, sizeof(temp), output_file) != NULL)
{
l++;
char* cval = strtok_r(temp, "\t", &next_token);
for (g = 0; g < 5; g++)
{
sscanf(cval, "%lf", &tempdouble);
result_array[l][g] = (float)tempdouble;
cval = strtok_r(NULL, "\t", &next_token);
}
sscanf(cval, "%lf", &tempdouble);
result_array[l][5] = (float)tempdouble;
cval = strtok_r(NULL, "\n", &next_token);
sscanf(cval, "%lf", &tempdouble);
result_array[l][6] = (float)tempdouble;
}
fclose(output_file);
if (j == 0)
{
result_array[0][5] = result_array[particle_header_row + 1][6];
}
if (j == particles_count - 1)
{
result_array[0][6] = result_array[particle_header_row + 1][6];
}
}
}
//Save result_array as binary file and delete text files
FILE *binout;
char binary_path[260] = "OUTPUT" OS_SEP "binary_output.ctwu";
fopen_s(&binout, binary_path, "wb");
if (binout == NULL)
{
printf("\n\nerror: could not create binary output file.\n");
free(result_array);
return 1;
}
for (h = 0; h < result_array_length; h++)
{
fwrite(result_array[h], sizeof(float), 7, binout);
}
fclose(binout);
free(result_array);
printf("...done.");
fcloseall();
for (j = 0; j < particles_count; j++)
{
remove(already_done_path);
char particle_path[260] = "";
sprintf_s(particle_path, 260, "%s_#%d%s", config_data->outputpath, (j + config_data->first_particle_number), ".txt");
if (remove(particle_path) != 0)
{
printf("\n\nerror: could not delete .txt file after conversion");
return 2;
}
}
return 0;
}
void printinfo()
{
// Print version
printf("ParticleIntegrator version " PI_VERSION_MAJOR "." PI_VERSION_MINOR "\n");
// Print build type
#ifdef RELTYPEDEB
printf(" Debug build\n");
#endif
#ifdef RELTYPERWDI
printf(" Release build with debug symbols\n");
#endif
#ifdef RELTYPEREL
printf(" Release build\n");
#endif
// Check windows
#ifdef _WIN32 || _WIN64
#ifdef _WIN64
printf(" Windows x86_64\n");
#else
printf(" Windows x86\n");
#endif
#endif
// Check GCC
#ifdef __GNUC__
#ifdef __x86_64__
printf(" Linux x86_64\n");
#else
#ifdef __ppc64__
printf(" Linux ppc64\n");
#else
printf(" Linux x86\n");
#endif
#endif
#endif
// Print active options in debug builds
#if defined(RELTYPERWDI) || defined(RELTYPEDEB)
printf(" " __DATE__ " " __TIME__ "\n Options: ");
#ifdef __WTIMING
printf("TIMING ");
#endif // __WTIMING
#ifdef __WSTEPINFO
printf("WSTEPINFO ");
#endif // __WSTEPINFO
#ifdef __PRD
printf("PRD ");
#endif // __PRD
#ifdef __SWD
printf("SWD ");
#endif // __SWD
#ifdef __Relativity
printf("Relativity ");
#endif // __Relativity
#ifdef __SaveRateOpt
printf("SaveRateOpt ");
#endif // __SaveRateOpt
#ifdef __CHKMEMLEAK
printf("CHKMEMLEAK ");
#endif // __CHKMEMLEAK
printf("\n");
#endif // RELTYPERWDI || RELTYPEDEB
} |
n_body.c | /*
* N Body problem
*
* Available in High Performance Parallelism Pearls ch 9/10
*
* questions ask Brian Gravelle gravelle@lanl.gov
*
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <omp.h>
#include <math.h>
#ifdef USE_CALI
#include <caliper/cali.h>
#endif
#define double double
#define TRUE 1
#define FALSE 0
struct Inputs
{
size_t order; // size of array
size_t time_steps;
int threads;
};
struct Parameters
{
double softing;
double dt;
};
struct Particles
{
double *m;
double *x;
double *y;
double *z;
double *vx;
double *vy;
double *vz;
};
void get_input(int argc, char **argv, struct Inputs* input);
void n_body_problem(size_t order, size_t time_steps,
struct Parameters* parameters,
struct Particles* particles);
void create_particles(size_t order, struct Particles* particles);
void destroy_particles(size_t order, struct Particles* particles);
// main function
int main(int argc, char **argv) {
struct Inputs input;
struct Particles particles;
struct Parameters parameters;
parameters.softing = 0.1;
parameters.dt = 0.1;
get_input(argc, argv, &input);
size_t correct;
double run_time;
double mflops;
size_t i,j,k,r;
double start, end;
omp_set_num_threads(input.threads);
int threads = omp_get_max_threads();
printf("\nAvailable threads = %d \n", threads);
#ifdef USE_CALI
cali_init();
cali_id_t thread_attr = cali_create_attribute("thread_id", CALI_TYPE_INT, CALI_ATTR_ASVALUE | CALI_ATTR_SKIP_EVENTS);
#ifdef USE_CALI_REG
#pragma omp parallel
{
cali_set_int(thread_attr, omp_get_thread_num());
}
#endif
#ifdef USE_CALI_UNCORE
cali_set_int(thread_attr, omp_get_thread_num());
#endif
#endif
// initialize the matrices
printf("init...\n");
create_particles(input.order, &particles);
printf("Running N Body .....");fflush(stdout);
start = omp_get_wtime();
n_body_problem(input.order, input.time_steps, ¶meters, &particles);
end = omp_get_wtime();
printf("%f\n\n", end-start);
destroy_particles(input.order, &particles);
return 0;
}
void n_body_problem(size_t order, size_t time_steps,
struct Parameters* parameters,
struct Particles* particles) {
double dx;
double dy;
double dz;
const double dt = parameters->dt;
const double s = parameters->softing;
for (int i = 0; i < time_steps; i++) {
#ifdef USE_CALI_UNCORE
CALI_MARK_BEGIN("N_Body");
#endif
// compute
#pragma omp parallel
{
#ifdef USE_CALI_REG
CALI_MARK_BEGIN("N_Body");
#endif
#pragma omp for
for (int j = 0; j < order; j++) {
double Fx = 0.0;
double Fy = 0.0;
double Fz = 0.0;
for (int k = 0; k < order; k++) {
const double dx = particles->x[k] - particles->x[j];
const double dy = particles->y[k] - particles->y[j];
const double dz = particles->z[k] - particles->z[j];
const double dr_sq = dx*dx + dy*dy + dz*dz + s;
const double dr_recip = 1.0 / sqrt(dr_sq);
const double dr_cube = dr_recip*dr_recip*dr_recip;
const double m_r = particles->m[k] * dr_cube;
Fx += m_r * dx;
Fy += m_r * dy;
Fz += m_r * dz;
}
particles->vx[j] += Fx * dt;
particles->vy[j] += Fy * dt;
particles->vz[j] += Fz * dt;
}
//update
#pragma omp for
for (int j = 0; j < order; j++) {
particles->x[j] += particles->vx[j] * dt;
particles->y[j] += particles->vy[j] * dt;
particles->z[j] += particles->vz[j] * dt;
}
#ifdef USE_CALI_REG
CALI_MARK_END("N_Body");
#endif
} //parallel
#ifdef USE_CALI_UNCORE
CALI_MARK_END("N_Body");
#endif
} // time step
}
void create_particles(size_t order, struct Particles* particles) {
particles->m = (double*)malloc(order*sizeof(double));
particles->x = (double*)malloc(order*sizeof(double));
particles->y = (double*)malloc(order*sizeof(double));
particles->z = (double*)malloc(order*sizeof(double));
particles->vx = (double*)malloc(order*sizeof(double));
particles->vy = (double*)malloc(order*sizeof(double));
particles->vz = (double*)malloc(order*sizeof(double));
}
void destroy_particles(size_t order, struct Particles* particles) {
free(particles->x);
free(particles->y);
free(particles->z);
free(particles->vx);
free(particles->vy);
free(particles->vz);
}
void get_input(int argc, char **argv, struct Inputs* input) {
int i = 1;
input->order = 65536; // 2^16
input->time_steps = 4;
input->threads = 16;
for(i = 1; i < argc; i++) {
if ( !(strcmp("-h", argv[i])) || !(strcmp("--help", argv[i])) ) {
printf("\n");
printf("n body problem help usage:\n");
printf(" -h --help ...................... print this message\n");
printf(" -n --num_particles [] .......... set the number of particles (65536)\n");
printf(" -s --time_steps [] ............. set the number of time steps (4)\n");
printf(" -t --num_threads [] ............ set the number of threads (16)\n");
printf("\n");
exit(0);
}
if ( !(strcmp("-n", argv[i])) || !(strcmp("--order", argv[i])) ) {
if (i++ < argc){
input->order = atoi(argv[i]);
} else {
printf("Please include a flop count with that option o/order\n");
exit(1);
}
}
if ( !(strcmp("-s", argv[i])) || !(strcmp("--time_steps", argv[i])) ) {
if (i++ < argc){
input->time_steps = atoi(argv[i]);
} else {
printf("Please include a count of time steps count with option t/time_steps\n");
exit(1);
}
}
if ( !(strcmp("-t", argv[i])) || !(strcmp("--num_threads", argv[i])) ) {
if (i++ < argc){
input->threads = atoi(argv[i]);
} else {
printf("Please include a number of threads with option n/num_threads\n");
exit(1);
}
}
}
}
|
a.15.1.c | /* { dg-do run } */
#include <stdio.h>
void
work (int n)
{
printf ("[%d of %d], nested = %d, n = %d\n", omp_get_thread_num (), omp_get_num_threads(), omp_get_nested (), n);
}
void
sub3 (int n)
{
work (n);
#pragma omp barrier
work (n);
}
void
sub2 (int k)
{
#pragma omp parallel shared(k)
sub3 (k);
}
void
sub1 (int n)
{
int i;
#pragma omp parallel private(i) shared(n)
{
#pragma omp for
for (i = 0; i < n; i++)
sub2 (i);
}
}
int
main ()
{
sub1 (2);
sub2 (15);
sub3 (20);
return 0;
}
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 24;
tile_size[3] = 512;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,8);t1++) {
lbp=max(ceild(t1,2),ceild(16*t1-Nt+3,16));
ubp=min(floord(Nt+Nz-4,16),floord(8*t1+Nz+5,16));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-2,3)),ceild(16*t2-Nz-20,24));t3<=min(min(min(floord(Nt+Ny-4,24),floord(8*t1+Ny+13,24)),floord(16*t2+Ny+12,24)),floord(16*t1-16*t2+Nz+Ny+11,24));t3++) {
for (t4=max(max(max(0,ceild(t1-63,64)),ceild(16*t2-Nz-508,512)),ceild(24*t3-Ny-508,512));t4<=min(min(min(min(floord(Nt+Nx-4,512),floord(8*t1+Nx+13,512)),floord(16*t2+Nx+12,512)),floord(24*t3+Nx+20,512)),floord(16*t1-16*t2+Nz+Nx+11,512));t4++) {
for (t5=max(max(max(max(max(0,8*t1),16*t1-16*t2+1),16*t2-Nz+2),24*t3-Ny+2),512*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,8*t1+15),16*t2+14),24*t3+22),512*t4+510),16*t1-16*t2+Nz+13);t5++) {
for (t6=max(max(16*t2,t5+1),-16*t1+16*t2+2*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(24*t3,t5+1);t7<=min(24*t3+23,t5+Ny-2);t7++) {
lbv=max(512*t4,t5+1);
ubv=min(512*t4+511,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
2Dfold.c | /*
* minimum free energy
* RNA secondary structure with
* basepair distance d_1 to reference structure 1 and distance d_2 to reference structure 2
*
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <ctype.h>
#include <string.h>
#include "ViennaRNA/utils/basic.h"
#include "ViennaRNA/params/default.h"
#include "ViennaRNA/fold_vars.h"
#include "ViennaRNA/fold.h"
#include "ViennaRNA/loops/all.h"
#include "ViennaRNA/params/basic.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#include "ViennaRNA/2Dfold.h"
/*
#################################
# GLOBAL VARIABLES #
#################################
*/
int compute_2Dfold_F3 = 0;
/*
#################################
# PRIVATE VARIABLES #
#################################
*/
/*
#################################
# PRIVATE FUNCTION DECLARATIONS #
#################################
*/
PRIVATE void mfe_linear(vrna_fold_compound_t *vc);
PRIVATE void mfe_circ(vrna_fold_compound_t *vc);
PUBLIC void update_TwoDfold_params(TwoDfold_vars *vars);
PRIVATE void backtrack_f5(unsigned int j,
int k,
int l,
char *structure,
vrna_fold_compound_t *vc);
PRIVATE void backtrack_c(unsigned int i,
unsigned int j,
int k,
int l,
char *structure,
vrna_fold_compound_t *vc);
PRIVATE void backtrack_m(unsigned int i,
unsigned int j,
int k,
int l,
char *structure,
vrna_fold_compound_t *vc);
PRIVATE void backtrack_m1(unsigned int i,
unsigned int j,
int k,
int l,
char *structure,
vrna_fold_compound_t *vc);
PRIVATE void backtrack_fc(int k,
int l,
char *structure,
vrna_fold_compound_t *vc);
PRIVATE void backtrack_m2(unsigned int i,
int k,
int l,
char *structure,
vrna_fold_compound_t *vc);
PRIVATE void adjustArrayBoundaries(int ***array,
int *k_min,
int *k_max,
int **l_min,
int **l_max,
int k_min_real,
int k_max_real,
int *l_min_real,
int *l_max_real);
INLINE PRIVATE void preparePosteriorBoundaries(int size,
int shift,
int *min_k,
int *max_k,
int **min_l,
int **max_l);
INLINE PRIVATE void updatePosteriorBoundaries(int d1,
int d2,
int *min_k,
int *max_k,
int **min_l,
int **max_l);
INLINE PRIVATE void prepareBoundaries(int min_k_pre,
int max_k_pre,
int min_l_pre,
int max_l_pre,
int bpdist,
int *min_k,
int *max_k,
int **min_l,
int **max_l);
INLINE PRIVATE void prepareArray(int ***array,
int min_k,
int max_k,
int *min_l,
int *max_l);
INLINE PRIVATE void prepareArray2(unsigned long ***array,
int min_k,
int max_k,
int *min_l,
int *max_l);
/*
#################################
# BEGIN OF FUNCTION DEFINITIONS #
#################################
*/
#if 0
PRIVATE void
initialize_TwoDfold_vars(TwoDfold_vars *vars)
{
update_TwoDfold_params(vars);
/* this call updates the params in the ViennaRNA fold.o which is a global, so be careful
* whith calling it parallel... need a workarround or fix of ViennaRNA fold stuff
*/
update_fold_params();
}
PUBLIC TwoDfold_solution **
TwoDfold(TwoDfold_vars *vars,
int distance1,
int distance2)
{
unsigned int i, d1, d2;
unsigned int maxD1;
unsigned int maxD2;
unsigned int length;
TwoDfold_solution **output;
initialize_TwoDfold_vars(vars);
if (fabs(vars->P->temperature - temperature) > 1e-6)
update_TwoDfold_params(vars);
vars->S = encode_sequence(vars->sequence, 0);
vars->S1 = encode_sequence(vars->sequence, 1);
make_ptypes(vars);
maxD1 = vars->maxD1;
maxD2 = vars->maxD2;
if (distance1 >= 0) {
if ((unsigned int)distance1 > maxD1)
fprintf(stderr,
"limiting maximum basepair distance 1 to %u\n",
maxD1);
else
maxD1 = (unsigned int)distance1;
}
if (distance2 >= 0) {
if ((unsigned int)distance2 > maxD2)
fprintf(stderr,
"limiting maximum basepair distance 2 to %u\n",
maxD2);
else
maxD2 = (unsigned int)distance2;
}
vars->maxD1 = maxD1;
vars->maxD2 = maxD2;
output = (TwoDfold_solution **)vrna_alloc((vars->maxD1 + 1) * sizeof(TwoDfold_solution *));
mfe_linear(vars);
if (vars->circ)
mfe_circ(vars);
length = vars->seq_length;
for (d1 = 0; d1 <= maxD1; d1++) {
output[d1] = (TwoDfold_solution *)vrna_alloc((vars->maxD2 + 1) * sizeof(TwoDfold_solution));
#ifdef _OPENMP
#pragma omp parallel for private(d2)
#endif
for (d2 = 0; d2 <= maxD2; d2++) {
output[d1][d2].en = (float)INF / (float)100.;
output[d1][d2].s = NULL;
}
if ((d1 >= ((vars->circ) ? vars->k_min_values_fc : vars->k_min_values_f[length]))
&& (d1 <= ((vars->circ) ? vars->k_max_values_fc : vars->k_max_values_f[length]))) {
#ifdef _OPENMP
#pragma omp parallel for private(d2, i)
#endif
for (d2 = ((vars->circ) ? vars->l_min_values_fc[d1] : vars->l_min_values_f[length][d1]);
d2 <= ((vars->circ) ? vars->l_max_values_fc[d1] : vars->l_max_values_f[length][d1]);
d2 += 2) {
output[d1][d2].en = (float)((vars->circ) ? vars->E_Fc[d1][d2 / 2] : vars->E_F5[length][d1][d2 / 2]) / (float)100.;
if (vars->do_backtrack && (output[d1][d2].en != (float)INF / (float)100.)) {
char *mfe_structure = (char *)vrna_alloc(length + 1);
for (i = 0; i < length; i++)
mfe_structure[i] = '.';
mfe_structure[i] = '\0';
(vars->circ) ? backtrack_fc(d1, d2, mfe_structure, vars) : backtrack_f5(length, d1, d2, mfe_structure, vars);
output[d1][d2].s = mfe_structure;
}
}
}
}
return output;
}
#endif
PUBLIC vrna_sol_TwoD_t *
vrna_mfe_TwoD(vrna_fold_compound_t *vars,
int distance1,
int distance2)
{
unsigned int i, d1, d2;
unsigned int maxD1;
unsigned int maxD2;
unsigned int length;
unsigned int counter = 0;
int en = 0;
vrna_sol_TwoD_t *output;
vrna_md_t *md;
vrna_mx_mfe_t *matrices;
maxD1 = vars->maxD1;
maxD2 = vars->maxD2;
matrices = vars->matrices;
md = &(vars->params->model_details);
if (distance1 >= 0) {
if ((unsigned int)distance1 > maxD1)
vrna_message_warning("vrna_mfe_TwoD@2Dfold.c: limiting maximum basepair distance 1 to %u\n",
maxD1);
else
maxD1 = (unsigned int)distance1;
}
if (distance2 >= 0) {
if ((unsigned int)distance2 > maxD2)
vrna_message_warning("vrna_mfe_TwoD@2Dfold.c: limiting maximum basepair distance 2 to %u\n",
maxD2);
else
maxD2 = (unsigned int)distance2;
}
vars->maxD1 = maxD1;
vars->maxD2 = maxD2;
output = (vrna_sol_TwoD_t *)vrna_alloc((((vars->maxD1 + 1) * (vars->maxD2 + 2)) / 2 + 2) * sizeof(vrna_sol_TwoD_t));
mfe_linear(vars);
if (md->circ)
mfe_circ(vars);
length = vars->length;
for (d1 = 0; d1 <= maxD1; d1++) {
if ((d1 >= ((md->circ) ? matrices->k_min_Fc : matrices->k_min_F5[length]))
&& (d1 <= ((md->circ) ? matrices->k_max_Fc : matrices->k_max_F5[length]))) {
for (d2 = ((md->circ) ? matrices->l_min_Fc[d1] : matrices->l_min_F5[length][d1]);
d2 <= ((md->circ) ? matrices->l_max_Fc[d1] : matrices->l_max_F5[length][d1]);
d2 += 2) {
en = ((md->circ) ? matrices->E_Fc[d1][d2 / 2] : matrices->E_F5[length][d1][d2 / 2]);
if (en == INF)
continue;
output[counter].k = d1;
output[counter].l = d2;
output[counter].en = (float)en / (float)100.;
if (md->backtrack) {
char *mfe_structure = (char *)vrna_alloc(length + 1);
for (i = 0; i < length; i++)
mfe_structure[i] = '.';
mfe_structure[i] = '\0';
(md->circ) ? backtrack_fc((int)d1, (int)d2, mfe_structure, vars) : backtrack_f5(length, (int)d1, (int)d2, mfe_structure, vars);
output[counter].s = mfe_structure;
} else {
output[counter].s = NULL;
}
counter++;
}
}
}
/* store entry for remaining partition if it exists */
en = ((md->circ) ? matrices->E_Fc_rem : matrices->E_F5_rem[length]);
if (en != INF) {
output[counter].k = -1;
output[counter].l = -1;
output[counter].en = (float)en / (float)100.;
if (md->backtrack) {
char *mfe_structure = (char *)vrna_alloc(length + 1);
for (i = 0; i < length; i++)
mfe_structure[i] = '.';
mfe_structure[i] = '\0';
(md->circ) ? backtrack_fc(-1, -1, mfe_structure, vars) : backtrack_f5(length, -1, -1, mfe_structure, vars);
output[counter].s = mfe_structure;
} else {
output[counter].s = NULL;
}
counter++;
}
/* insert end-marker entry */
output[counter].k = output[counter].l = INF;
counter++;
/* resize to actual dataset amount */
output = (vrna_sol_TwoD_t *)vrna_realloc(output, sizeof(vrna_sol_TwoD_t) * counter);
return output;
}
PUBLIC char *
vrna_backtrack5_TwoD(vrna_fold_compound_t *vc,
int k,
int l,
unsigned int j)
{
unsigned int i;
char *mfe_structure = (char *)vrna_alloc(j + 1);
if (j < vc->params->model_details.min_loop_size + 2)
return NULL;
for (i = 0; i < j; i++)
mfe_structure[i] = '.';
mfe_structure[i] = '\0';
backtrack_f5(j, k, l, mfe_structure, vc);
return mfe_structure;
}
PRIVATE void
mfe_linear(vrna_fold_compound_t *vc)
{
unsigned int d, i, j, ij, maxD1, maxD2, seq_length, dia, dib, dja, djb, *referenceBPs1, *referenceBPs2, *mm1, *mm2, *bpdist;
int cnt1, cnt2, cnt3, cnt4, d1, d2, energy, dangles, temp2, type, additional_en, *my_iindx, *jindx, circ, *rtype, turn;
short *S1, *reference_pt1, *reference_pt2;
char *sequence, *ptype;
vrna_param_t *P;
vrna_mx_mfe_t *matrices;
vrna_md_t *md;
/* dereferenciate things we often need */
P = vc->params;
md = &(P->model_details);
matrices = vc->matrices;
sequence = vc->sequence;
seq_length = vc->length;
maxD1 = vc->maxD1;
maxD2 = vc->maxD2;
S1 = vc->sequence_encoding;
ptype = vc->ptype;
rtype = &(md->rtype[0]);
reference_pt1 = vc->reference_pt1;
reference_pt2 = vc->reference_pt2;
my_iindx = vc->iindx;
jindx = vc->jindx;
referenceBPs1 = vc->referenceBPs1;
referenceBPs2 = vc->referenceBPs2;
mm1 = vc->mm1;
mm2 = vc->mm2;
bpdist = vc->bpdist;
dangles = md->dangles;
circ = md->circ;
turn = md->min_loop_size;
for (d = turn + 2; d <= seq_length; d++) {
/* i,j in [1..length] */
#ifdef _OPENMP
#pragma omp parallel for private(additional_en, j, energy, temp2, i, ij, dia,dib,dja,djb,cnt1,cnt2,cnt3,cnt4, d1, d2)
#endif
for (j = d; j <= seq_length; j++) {
unsigned int p, q, pq, u, maxp, dij;
int type_2, type, tt, no_close, base_d1, base_d2;
i = j - d + 1;
dij = j - i - 1;
ij = my_iindx[i] - j;
type = ptype[jindx[j] + i];
no_close = (((type == 3) || (type == 4)) && no_closingGU);
if (type) {
/* we have a pair */
/* increase or decrease distance-to-reference value depending whether (i,j) is included in
* reference or has to be introduced
*/
base_d1 = ((unsigned int)reference_pt1[i] != j) ? 1 : -1;
base_d2 = ((unsigned int)reference_pt2[i] != j) ? 1 : -1;
/* HAIRPIN STRUCTURES */
/* get distance to reference if closing the hairpin
* d = dbp(T_{i,j}, {i,j})
*/
d1 = base_d1 + referenceBPs1[ij];
d2 = base_d2 + referenceBPs2[ij];
int min_k, max_k, min_l, max_l;
int real_min_k, real_max_k, *min_l_real, *max_l_real;
min_l = min_k = 0;
max_k = mm1[ij] + referenceBPs1[ij];
max_l = mm2[ij] + referenceBPs2[ij];
prepareBoundaries(min_k,
max_k,
min_l,
max_l,
bpdist[ij],
&matrices->k_min_C[ij],
&matrices->k_max_C[ij],
&matrices->l_min_C[ij],
&matrices->l_max_C[ij]
);
preparePosteriorBoundaries(matrices->k_max_C[ij] - matrices->k_min_C[ij] + 1,
matrices->k_min_C[ij],
&real_min_k,
&real_max_k,
&min_l_real,
&max_l_real
);
prepareArray(&matrices->E_C[ij],
matrices->k_min_C[ij],
matrices->k_max_C[ij],
matrices->l_min_C[ij],
matrices->l_max_C[ij]
);
#ifdef COUNT_STATES
prepareArray2(&matrices->N_C[ij],
matrices->k_min_C[ij],
matrices->k_max_C[ij],
matrices->l_min_C[ij],
matrices->l_max_C[ij]
);
#endif
/* d1 and d2 are the distancies to both references introduced by closing a hairpin structure at (i,j) */
if ((d1 >= 0) && (d2 >= 0)) {
if (((unsigned int)d1 <= maxD1) && ((unsigned int)d2 <= maxD2)) {
matrices->E_C[ij][d1][d2 / 2] = (no_close) ? FORBIDDEN : E_Hairpin(dij, type, S1[i + 1], S1[j - 1], sequence + i - 1, P);
updatePosteriorBoundaries(d1,
d2,
&real_min_k,
&real_max_k,
&min_l_real,
&max_l_real
);
#ifdef COUNT_STATES
matrices->N_C[ij][d1][d2 / 2] = 1;
#endif
} else {
matrices->E_C_rem[ij] = (no_close) ? FORBIDDEN : E_Hairpin(dij, type, S1[i + 1], S1[j - 1], sequence + i - 1, P);
}
}
/* INTERIOR LOOP STRUCTURES */
maxp = MIN2(j - 2 - turn, i + MAXLOOP + 1);
for (p = i + 1; p <= maxp; p++) {
unsigned int minq = p + turn + 1;
unsigned int ln_pre = dij + p;
if (ln_pre > minq + MAXLOOP)
minq = ln_pre - MAXLOOP - 1;
for (q = minq; q < j; q++) {
pq = my_iindx[p] - q;
/* set distance to reference structure... */
type_2 = ptype[jindx[q] + p];
if (type_2 == 0)
continue;
type_2 = rtype[type_2];
/* get distance to reference if closing the interior loop
* d2 = dbp(S_{i,j}, S_{p.q} + {i,j})
*/
d1 = base_d1 + referenceBPs1[ij] - referenceBPs1[pq];
d2 = base_d2 + referenceBPs2[ij] - referenceBPs2[pq];
if (no_closingGU)
if (no_close || (type_2 == 3) || (type_2 == 4))
if ((p > i + 1) || (q < j - 1))
continue;
/* continue unless stack */
energy = E_IntLoop(p - i - 1, j - q - 1, type, type_2, S1[i + 1], S1[j - 1], S1[p - 1], S1[q + 1], P);
if (matrices->E_C[pq] != NULL) {
for (cnt1 = matrices->k_min_C[pq]; cnt1 <= matrices->k_max_C[pq]; cnt1++) {
for (cnt2 = matrices->l_min_C[pq][cnt1]; cnt2 <= matrices->l_max_C[pq][cnt1]; cnt2 += 2) {
if (matrices->E_C[pq][cnt1][cnt2 / 2] != INF) {
if (((cnt1 + d1) <= maxD1) && ((cnt2 + d2) <= maxD2)) {
matrices->E_C[ij][cnt1 + d1][(cnt2 + d2) / 2] = MIN2(matrices->E_C[ij][cnt1 + d1][(cnt2 + d2) / 2],
matrices->E_C[pq][cnt1][cnt2 / 2] + energy
);
updatePosteriorBoundaries(cnt1 + d1,
cnt2 + d2,
&real_min_k,
&real_max_k,
&min_l_real,
&max_l_real
);
#ifdef COUNT_STATES
matrices->N_C[ij][cnt1 + d1][(cnt2 + d2) / 2] += matrices->N_C[pq][cnt1][cnt2 / 2];
#endif
}
/* collect all cases where d1+cnt1 or d2+cnt2 exceeds maxD1, maxD2, respectively */
else {
matrices->E_C_rem[ij] = MIN2(matrices->E_C_rem[ij], matrices->E_C[pq][cnt1][cnt2 / 2] + energy);
}
}
}
}
}
/* collect all contributions where C[pq] already lies outside k_max, l_max boundary */
if (matrices->E_C_rem[pq] != INF)
matrices->E_C_rem[ij] = MIN2(matrices->E_C_rem[ij], matrices->E_C_rem[pq] + energy);
} /* end q-loop */
} /* end p-loop */
/* MULTI LOOP STRUCTURES */
if (!no_close) {
/* dangle energies for multiloop closing stem */
tt = rtype[type];
temp2 = P->MLclosing;
if (dangles == 2)
temp2 += E_MLstem(tt, S1[j - 1], S1[i + 1], P);
else
temp2 += E_MLstem(tt, -1, -1, P);
for (u = i + turn + 2; u < j - turn - 2; u++) {
int i1u = my_iindx[i + 1] - u;
int u1j1 = my_iindx[u + 1] - j + 1;
/* check all cases where either M or M1 are already out of scope of maxD1 and/or maxD2 */
if (matrices->E_M_rem[i1u] != INF) {
for (cnt3 = matrices->k_min_M1[u1j1];
cnt3 <= matrices->k_max_M1[u1j1];
cnt3++)
for (cnt4 = matrices->l_min_M1[u1j1][cnt3];
cnt4 <= matrices->l_max_M1[u1j1][cnt3];
cnt4 += 2) {
if (matrices->E_M1[u1j1][cnt3][cnt4 / 2] != INF) {
matrices->E_C_rem[ij] = MIN2(matrices->E_C_rem[ij],
matrices->E_M_rem[i1u]
+ matrices->E_M1[u1j1][cnt3][cnt4 / 2]
+ temp2
);
}
}
if (matrices->E_M1_rem[u1j1] != INF) {
matrices->E_C_rem[ij] = MIN2(matrices->E_C_rem[ij],
matrices->E_M_rem[i1u]
+ matrices->E_M1_rem[u1j1]
+ temp2
);
}
}
if (matrices->E_M1_rem[u1j1] != INF) {
for (cnt1 = matrices->k_min_M[i1u];
cnt1 <= matrices->k_max_M[i1u];
cnt1++)
for (cnt2 = matrices->l_min_M[i1u][cnt1];
cnt2 <= matrices->l_max_M[i1u][cnt1];
cnt2 += 2)
if (matrices->E_M[i1u][cnt1][cnt2 / 2] != INF) {
matrices->E_C_rem[ij] = MIN2(matrices->E_C_rem[ij],
matrices->E_M[i1u][cnt1][cnt2 / 2]
+ matrices->E_M1_rem[u1j1]
+ temp2
);
}
}
/* get distance to reference if closing the multiloop
* d = dbp(S_{i,j}, {i,j} + S_{i+1,u} + S_{u+1,j-1})
*/
if (!matrices->E_M[i1u])
continue;
if (!matrices->E_M1[u1j1])
continue;
d1 = base_d1 + referenceBPs1[ij] - referenceBPs1[i1u] - referenceBPs1[u1j1];
d2 = base_d2 + referenceBPs2[ij] - referenceBPs2[i1u] - referenceBPs2[u1j1];
for (cnt1 = matrices->k_min_M[i1u];
cnt1 <= matrices->k_max_M[i1u];
cnt1++)
for (cnt2 = matrices->l_min_M[i1u][cnt1];
cnt2 <= matrices->l_max_M[i1u][cnt1];
cnt2 += 2)
for (cnt3 = matrices->k_min_M1[u1j1];
cnt3 <= matrices->k_max_M1[u1j1];
cnt3++)
for (cnt4 = matrices->l_min_M1[u1j1][cnt3];
cnt4 <= matrices->l_max_M1[u1j1][cnt3];
cnt4 += 2) {
if ((matrices->E_M[i1u][cnt1][cnt2 / 2] != INF) && (matrices->E_M1[u1j1][cnt3][cnt4 / 2] != INF)) {
if (((cnt1 + cnt3 + d1) <= maxD1) && ((cnt2 + cnt4 + d2) <= maxD2)) {
matrices->E_C[ij][cnt1 + cnt3 + d1][(cnt2 + cnt4 + d2) / 2] = MIN2(matrices->E_C[ij][cnt1 + cnt3 + d1][(cnt2 + cnt4 + d2) / 2],
matrices->E_M[i1u][cnt1][cnt2 / 2]
+ matrices->E_M1[u1j1][cnt3][cnt4 / 2]
+ temp2
);
updatePosteriorBoundaries(cnt1 + cnt3 + d1,
cnt2 + cnt4 + d2,
&real_min_k,
&real_max_k,
&min_l_real,
&max_l_real
);
#ifdef COUNT_STATES
matrices->N_C[ij][cnt1 + cnt3 + d1][(cnt2 + cnt4 + d2) / 2] += matrices->N_M[i1u][cnt1][cnt2 / 2] * matrices->N_M1[u1j1][cnt3][cnt4 / 2];
#endif
}
/* collect all cases where d1+cnt1+cnt3 or d2+cnt2+cnt4 exceeds maxD1, maxD2, respectively */
else {
matrices->E_C_rem[ij] = MIN2(matrices->E_C_rem[ij],
matrices->E_M[i1u][cnt1][cnt2 / 2]
+ matrices->E_M1[u1j1][cnt3][cnt4 / 2]
+ temp2
);
}
}
}
}
}
/* resize and move memory portions of energy matrix E_C */
adjustArrayBoundaries(&matrices->E_C[ij],
&matrices->k_min_C[ij],
&matrices->k_max_C[ij],
&matrices->l_min_C[ij],
&matrices->l_max_C[ij],
real_min_k,
real_max_k,
min_l_real,
max_l_real
);
#ifdef COUNT_STATES
/* actually we should adjust the array boundaries here but we might never use the count states option more than once so what....*/
#endif
} /* end >> if (pair) << */
/* done with c[i,j], now compute fML[i,j] */
/* free ends ? -----------------------------------------*/
dia = referenceBPs1[ij] - referenceBPs1[my_iindx[i + 1] - j];
dib = referenceBPs2[ij] - referenceBPs2[my_iindx[i + 1] - j];
dja = referenceBPs1[ij] - referenceBPs1[ij + 1];
djb = referenceBPs2[ij] - referenceBPs2[ij + 1];
if (dangles == 2)
temp2 = E_MLstem(type, ((i > 1) || circ) ? S1[i - 1] : -1, ((j < seq_length) || circ) ? S1[j + 1] : -1, P);
else
temp2 = E_MLstem(type, -1, -1, P);
int min_k_guess, max_k_guess, min_l_guess, max_l_guess;
int min_k_real_m, max_k_real_m, *min_l_real_m, *max_l_real_m;
int min_k_real_m1, max_k_real_m1, *min_l_real_m1, *max_l_real_m1;
min_k_guess = min_l_guess = 0;
max_k_guess = mm1[ij] + referenceBPs1[ij];
max_l_guess = mm2[ij] + referenceBPs2[ij];
prepareBoundaries(min_k_guess,
max_k_guess,
min_l_guess,
max_l_guess,
bpdist[ij],
&matrices->k_min_M[ij],
&matrices->k_max_M[ij],
&matrices->l_min_M[ij],
&matrices->l_max_M[ij]
);
prepareBoundaries(min_k_guess,
max_k_guess,
min_l_guess,
max_l_guess,
bpdist[ij],
&matrices->k_min_M1[ij],
&matrices->k_max_M1[ij],
&matrices->l_min_M1[ij],
&matrices->l_max_M1[ij]
);
preparePosteriorBoundaries(matrices->k_max_M[ij] - matrices->k_min_M[ij] + 1,
matrices->k_min_M[ij],
&min_k_real_m,
&max_k_real_m,
&min_l_real_m,
&max_l_real_m
);
preparePosteriorBoundaries(matrices->k_max_M1[ij] - matrices->k_min_M1[ij] + 1,
matrices->k_min_M1[ij],
&min_k_real_m1,
&max_k_real_m1,
&min_l_real_m1,
&max_l_real_m1
);
prepareArray(&matrices->E_M[ij],
matrices->k_min_M[ij],
matrices->k_max_M[ij],
matrices->l_min_M[ij],
matrices->l_max_M[ij]
);
prepareArray(&matrices->E_M1[ij],
matrices->k_min_M1[ij],
matrices->k_max_M1[ij],
matrices->l_min_M1[ij],
matrices->l_max_M1[ij]
);
#ifdef COUNT_STATES
prepareArray2(&matrices->N_M[ij],
matrices->k_min_M[ij],
matrices->k_max_M[ij],
matrices->l_min_M[ij],
matrices->l_max_M[ij]
);
prepareArray2(&matrices->N_M1[ij],
matrices->k_min_M1[ij],
matrices->k_max_M1[ij],
matrices->l_min_M1[ij],
matrices->l_max_M1[ij]
);
#endif
/* now to the actual computations... */
/* 1st E_M[ij] = E_M1[ij] = E_C[ij] + b */
if (matrices->E_C_rem[ij] != INF)
matrices->E_M_rem[ij] = matrices->E_M1_rem[ij] = temp2 + matrices->E_C_rem[ij];
if (matrices->E_C[ij]) {
for (cnt1 = matrices->k_min_C[ij]; cnt1 <= matrices->k_max_C[ij]; cnt1++) {
for (cnt2 = matrices->l_min_C[ij][cnt1]; cnt2 <= matrices->l_max_C[ij][cnt1]; cnt2 += 2) {
if (matrices->E_C[ij][cnt1][cnt2 / 2] != INF) {
matrices->E_M[ij][cnt1][cnt2 / 2] = matrices->E_M1[ij][cnt1][cnt2 / 2] = temp2 + matrices->E_C[ij][cnt1][cnt2 / 2];
updatePosteriorBoundaries(cnt1,
cnt2,
&min_k_real_m,
&max_k_real_m,
&min_l_real_m,
&max_l_real_m
);
updatePosteriorBoundaries(cnt1,
cnt2,
&min_k_real_m1,
&max_k_real_m1,
&min_l_real_m1,
&max_l_real_m1
);
#ifdef COUNT_STATES
matrices->N_M[ij][cnt1][cnt2 / 2] = matrices->N_M1[ij][cnt1][cnt2 / 2] = matrices->N_C[ij][cnt1][cnt2 / 2];
#endif
}
}
}
}
/* 2nd E_M[ij] = MIN(E_M[ij], E_M[i+1,j] + c) */
if (matrices->E_M_rem[my_iindx[i + 1] - j] != INF) {
matrices->E_M_rem[ij] = MIN2(matrices->E_M_rem[ij],
matrices->E_M_rem[my_iindx[i + 1] - j] + P->MLbase
);
}
if (matrices->E_M[my_iindx[i + 1] - j]) {
for (cnt1 = matrices->k_min_M[my_iindx[i + 1] - j];
cnt1 <= matrices->k_max_M[my_iindx[i + 1] - j];
cnt1++) {
for (cnt2 = matrices->l_min_M[my_iindx[i + 1] - j][cnt1];
cnt2 <= matrices->l_max_M[my_iindx[i + 1] - j][cnt1];
cnt2 += 2) {
if (matrices->E_M[my_iindx[i + 1] - j][cnt1][cnt2 / 2] != INF) {
if (((cnt1 + dia) <= maxD1) && ((cnt2 + dib) <= maxD2)) {
matrices->E_M[ij][cnt1 + dia][(cnt2 + dib) / 2] = MIN2(matrices->E_M[ij][cnt1 + dia][(cnt2 + dib) / 2],
matrices->E_M[my_iindx[i + 1] - j][cnt1][cnt2 / 2] + P->MLbase
);
updatePosteriorBoundaries(cnt1 + dia,
cnt2 + dib,
&min_k_real_m,
&max_k_real_m,
&min_l_real_m,
&max_l_real_m
);
#ifdef COUNT_STATES
matrices->N_M[ij][cnt1 + dia][(cnt2 + dib) / 2] += matrices->N_M[my_iindx[i + 1] - j][cnt1][cnt2 / 2];
#endif
}
/* collect all cases where dia+cnt1 or dib+cnt2 exceeds maxD1, maxD2, respectively */
else {
matrices->E_M_rem[ij] = MIN2(matrices->E_M_rem[ij],
matrices->E_M[my_iindx[i + 1] - j][cnt1][cnt2 / 2] + P->MLbase
);
}
}
}
}
}
/* 3rd E_M[ij] = MIN(E_M[ij], E_M[i,j-1] + c) */
if (matrices->E_M_rem[ij + 1] != INF) {
matrices->E_M_rem[ij] = MIN2(matrices->E_M_rem[ij],
matrices->E_M_rem[ij + 1] + P->MLbase
);
}
if (matrices->E_M[ij + 1]) {
for (cnt1 = matrices->k_min_M[ij + 1];
cnt1 <= matrices->k_max_M[ij + 1];
cnt1++) {
for (cnt2 = matrices->l_min_M[ij + 1][cnt1];
cnt2 <= matrices->l_max_M[ij + 1][cnt1];
cnt2 += 2) {
if (matrices->E_M[ij + 1][cnt1][cnt2 / 2] != INF) {
if (((cnt1 + dja) <= maxD1) && ((cnt2 + djb) <= maxD2)) {
matrices->E_M[ij][cnt1 + dja][(cnt2 + djb) / 2] = MIN2(matrices->E_M[ij][cnt1 + dja][(cnt2 + djb) / 2],
matrices->E_M[ij + 1][cnt1][cnt2 / 2] + P->MLbase
);
updatePosteriorBoundaries(cnt1 + dja,
cnt2 + djb,
&min_k_real_m,
&max_k_real_m,
&min_l_real_m,
&max_l_real_m
);
#ifdef COUNT_STATES
matrices->N_M[ij][cnt1 + dja][(cnt2 + djb) / 2] += matrices->N_M[ij + 1][cnt1][cnt2 / 2];
#endif
}
/* collect all cases where dja+cnt1 or djb+cnt2 exceeds maxD1, maxD2, respectively */
else {
matrices->E_M_rem[ij] = MIN2(matrices->E_M_rem[ij],
matrices->E_M[ij + 1][cnt1][cnt2 / 2] + P->MLbase
);
}
}
}
}
}
/* 4th E_M1[ij] = MIN(E_M1[ij], E_M1[i,j-1] + c) */
if (matrices->E_M1_rem[ij + 1] != INF) {
matrices->E_M1_rem[ij] = MIN2(matrices->E_M1_rem[ij],
matrices->E_M1_rem[ij + 1] + P->MLbase
);
}
if (matrices->E_M1[ij + 1]) {
for (cnt1 = matrices->k_min_M1[ij + 1];
cnt1 <= matrices->k_max_M1[ij + 1];
cnt1++) {
for (cnt2 = matrices->l_min_M1[ij + 1][cnt1];
cnt2 <= matrices->l_max_M1[ij + 1][cnt1];
cnt2 += 2) {
if (matrices->E_M1[ij + 1][cnt1][cnt2 / 2] != INF) {
if (((cnt1 + dja) <= maxD1) && ((cnt2 + djb) <= maxD2)) {
matrices->E_M1[ij][cnt1 + dja][(cnt2 + djb) / 2] = MIN2(matrices->E_M1[ij][cnt1 + dja][(cnt2 + djb) / 2],
matrices->E_M1[ij + 1][cnt1][cnt2 / 2] + P->MLbase
);
updatePosteriorBoundaries(cnt1 + dja,
cnt2 + djb,
&min_k_real_m1,
&max_k_real_m1,
&min_l_real_m1,
&max_l_real_m1
);
#ifdef COUNT_STATES
matrices->N_M1[ij][cnt1 + dja][(cnt2 + djb) / 2] += matrices->N_M1[ij + 1][cnt1][cnt2 / 2];
#endif
}
/* collect all cases where dja+cnt1 or djb+cnt2 exceeds maxD1, maxD2, respectively */
else {
matrices->E_M1_rem[ij] = MIN2(matrices->E_M1_rem[ij],
matrices->E_M1[ij + 1][cnt1][cnt2 / 2] + P->MLbase
);
}
}
}
}
}
/* 5th E_M[ij] = MIN(E_M[ij], min(E_M[i,k] + E_M[k+1,j])) */
if (j > turn + 2) {
for (u = i + 1 + turn; u <= j - 2 - turn; u++) {
/* check all cases where M(i,u) and/or M(u+1,j) are already out of scope of maxD1 and/or maxD2 */
if (matrices->E_M_rem[my_iindx[i] - u] != INF) {
for (cnt3 = matrices->k_min_M[my_iindx[u + 1] - j];
cnt3 <= matrices->k_max_M[my_iindx[u + 1] - j];
cnt3++) {
for (cnt4 = matrices->l_min_M[my_iindx[u + 1] - j][cnt3];
cnt4 <= matrices->l_max_M[my_iindx[u + 1] - j][cnt3];
cnt4 += 2) {
if (matrices->E_M[my_iindx[u + 1] - j][cnt3][cnt4 / 2] != INF) {
matrices->E_M_rem[ij] = MIN2(matrices->E_M_rem[ij],
matrices->E_M_rem[my_iindx[i] - u] + matrices->E_M[my_iindx[u + 1] - j][cnt3][cnt4 / 2]
);
}
}
}
if (matrices->E_M_rem[my_iindx[u + 1] - j] != INF) {
matrices->E_M_rem[ij] = MIN2(matrices->E_M_rem[ij],
matrices->E_M_rem[my_iindx[i] - u] + matrices->E_M_rem[my_iindx[u + 1] - j]
);
}
}
if (matrices->E_M_rem[my_iindx[u + 1] - j] != INF) {
for (cnt1 = matrices->k_min_M[my_iindx[i] - u];
cnt1 <= matrices->k_max_M[my_iindx[i] - u];
cnt1++) {
for (cnt2 = matrices->l_min_M[my_iindx[i] - u][cnt1];
cnt2 <= matrices->l_max_M[my_iindx[i] - u][cnt1];
cnt2 += 2) {
if (matrices->E_M[my_iindx[i] - u][cnt1][cnt2 / 2] != INF) {
matrices->E_M_rem[ij] = MIN2(matrices->E_M_rem[ij],
matrices->E_M[my_iindx[i] - u][cnt1][cnt2 / 2] + matrices->E_M_rem[my_iindx[u + 1] - j]
);
}
}
}
}
if (!matrices->E_M[my_iindx[i] - u])
continue;
if (!matrices->E_M[my_iindx[u + 1] - j])
continue;
dia = referenceBPs1[ij] - referenceBPs1[my_iindx[i] - u] - referenceBPs1[my_iindx[u + 1] - j];
dib = referenceBPs2[ij] - referenceBPs2[my_iindx[i] - u] - referenceBPs2[my_iindx[u + 1] - j];
for (cnt1 = matrices->k_min_M[my_iindx[i] - u];
cnt1 <= matrices->k_max_M[my_iindx[i] - u];
cnt1++) {
for (cnt2 = matrices->l_min_M[my_iindx[i] - u][cnt1];
cnt2 <= matrices->l_max_M[my_iindx[i] - u][cnt1];
cnt2 += 2) {
for (cnt3 = matrices->k_min_M[my_iindx[u + 1] - j];
cnt3 <= matrices->k_max_M[my_iindx[u + 1] - j];
cnt3++) {
for (cnt4 = matrices->l_min_M[my_iindx[u + 1] - j][cnt3];
cnt4 <= matrices->l_max_M[my_iindx[u + 1] - j][cnt3];
cnt4 += 2) {
if ((matrices->E_M[my_iindx[i] - u][cnt1][cnt2 / 2] != INF) && (matrices->E_M[my_iindx[u + 1] - j][cnt3][cnt4 / 2] != INF)) {
if (((cnt1 + cnt3 + dia) <= maxD1) && ((cnt2 + cnt4 + dib) <= maxD2)) {
matrices->E_M[ij][cnt1 + cnt3 + dia][(cnt2 + cnt4 + dib) / 2] = MIN2(matrices->E_M[ij][cnt1 + cnt3 + dia][(cnt2 + cnt4 + dib) / 2],
matrices->E_M[my_iindx[i] - u][cnt1][cnt2 / 2]
+ matrices->E_M[my_iindx[u + 1] - j][cnt3][cnt4 / 2]
);
updatePosteriorBoundaries(cnt1 + cnt3 + dia,
cnt2 + cnt4 + dib,
&min_k_real_m,
&max_k_real_m,
&min_l_real_m,
&max_l_real_m
);
#ifdef COUNT_STATES
matrices->N_M[ij][cnt1 + cnt3 + dia][(cnt2 + cnt4 + dib) / 2] += matrices->N_M[my_iindx[i] - u][cnt1][cnt2 / 2] * matrices->N_M1[my_iindx[u + 1] - j][cnt3][cnt4 / 2];
#endif
}
/* collect all cases where dia+cnt1+cnt3 or dib+cnt2+cnt4 exceeds maxD1, maxD2, respectively */
else {
matrices->E_M_rem[ij] = MIN2(matrices->E_M_rem[ij],
matrices->E_M[my_iindx[i] - u][cnt1][cnt2 / 2] + matrices->E_M[my_iindx[u + 1] - j][cnt3][cnt4 / 2]
);
}
}
}
}
}
}
}
}
/* thats all folks for the multiloop decomposition... */
adjustArrayBoundaries(&matrices->E_M[ij],
&matrices->k_min_M[ij],
&matrices->k_max_M[ij],
&matrices->l_min_M[ij],
&matrices->l_max_M[ij],
min_k_real_m,
max_k_real_m,
min_l_real_m,
max_l_real_m
);
adjustArrayBoundaries(&matrices->E_M1[ij],
&matrices->k_min_M1[ij],
&matrices->k_max_M1[ij],
&matrices->l_min_M1[ij],
&matrices->l_max_M1[ij],
min_k_real_m1,
max_k_real_m1,
min_l_real_m1,
max_l_real_m1
);
#ifdef COUNT_STATES
/* actually we should adjust the array boundaries here but we might never use the count states option more than once so what....*/
#endif
} /* end of j-loop */
}
/* calculate energies of 5' and 3' fragments */
/* prepare first entries in E_F5 */
for (cnt1 = 1; cnt1 <= turn + 1; cnt1++) {
matrices->E_F5[cnt1] = (int **)vrna_alloc(sizeof(int *));
matrices->E_F5[cnt1][0] = (int *)vrna_alloc(sizeof(int));
matrices->E_F5[cnt1][0][0] = 0;
matrices->E_F5_rem[cnt1] = INF;
matrices->k_min_F5[cnt1] = matrices->k_max_F5[cnt1] = 0;
matrices->l_min_F5[cnt1] = (int *)vrna_alloc(sizeof(int));
matrices->l_max_F5[cnt1] = (int *)vrna_alloc(sizeof(int));
matrices->l_min_F5[cnt1][0] = matrices->l_max_F5[cnt1][0] = 0;
#ifdef COUNT_STATES
matrices->N_F5[cnt1] = (unsigned long **)vrna_alloc(sizeof(unsigned long *));
matrices->N_F5[cnt1][0] = (unsigned long *)vrna_alloc(sizeof(unsigned long));
matrices->N_F5[cnt1][0][0] = 1;
#endif
}
for (j = turn + 2; j <= seq_length; j++) {
unsigned int da = referenceBPs1[my_iindx[1] - j] - referenceBPs1[my_iindx[1] - j + 1];
unsigned int db = referenceBPs2[my_iindx[1] - j] - referenceBPs2[my_iindx[1] - j + 1];
type = ptype[jindx[j] + 1];
additional_en = 0;
if (type) {
if (dangles == 2)
additional_en += vrna_E_ext_stem(type, -1, j < seq_length ? S1[j + 1] : -1, P);
else
additional_en += vrna_E_ext_stem(type, -1, -1, P);
}
/* make min and max k guess for memory allocation */
int min_k_guess, max_k_guess, min_l_guess, max_l_guess;
int *min_l_real, *max_l_real, min_k_real, max_k_real;
min_k_guess = min_l_guess = 0;
max_k_guess = referenceBPs1[my_iindx[1] - j] + mm1[my_iindx[1] - j];
max_l_guess = referenceBPs2[my_iindx[1] - j] + mm2[my_iindx[1] - j];
prepareBoundaries(min_k_guess,
max_k_guess,
min_l_guess,
max_l_guess,
bpdist[my_iindx[1] - j],
&matrices->k_min_F5[j],
&matrices->k_max_F5[j],
&matrices->l_min_F5[j],
&matrices->l_max_F5[j]
);
preparePosteriorBoundaries(matrices->k_max_F5[j] - matrices->k_min_F5[j] + 1,
matrices->k_min_F5[j],
&min_k_real,
&max_k_real,
&min_l_real,
&max_l_real
);
prepareArray(&matrices->E_F5[j],
matrices->k_min_F5[j],
matrices->k_max_F5[j],
matrices->l_min_F5[j],
matrices->l_max_F5[j]
);
#ifdef COUNT_STATES
prepareArray2(&matrices->N_F5[j],
matrices->k_min_F5[j],
matrices->k_max_F5[j],
matrices->l_min_F5[j],
matrices->l_max_F5[j]
);
#endif
/* begin the actual computation of 5' end energies */
/* j-1 is unpaired ... */
matrices->E_F5_rem[j] = matrices->E_F5_rem[j - 1];
for (cnt1 = matrices->k_min_F5[j - 1]; cnt1 <= matrices->k_max_F5[j - 1]; cnt1++) {
for (cnt2 = matrices->l_min_F5[j - 1][cnt1]; cnt2 <= matrices->l_max_F5[j - 1][cnt1]; cnt2 += 2) {
if (((cnt1 + da) <= maxD1) && ((cnt2 + db) <= maxD2)) {
matrices->E_F5[j][cnt1 + da][(cnt2 + db) / 2] = MIN2(matrices->E_F5[j][cnt1 + da][(cnt2 + db) / 2],
matrices->E_F5[j - 1][cnt1][cnt2 / 2]
);
updatePosteriorBoundaries(cnt1 + da,
cnt2 + db,
&min_k_real,
&max_k_real,
&min_l_real,
&max_l_real
);
#ifdef COUNT_STATES
matrices->N_F5[j][cnt1 + da][(cnt2 + db) / 2] += matrices->N_F5[j - 1][cnt1][cnt2 / 2];
#endif
}
/* collect all cases where da+cnt1 or db+cnt2 exceeds maxD1, maxD2, respectively */
else {
matrices->E_F5_rem[j] = MIN2(matrices->E_F5_rem[j], matrices->E_F5[j - 1][cnt1][cnt2 / 2]);
}
}
}
/* j pairs with 1 */
if (matrices->E_C_rem[my_iindx[1] - j] != INF)
matrices->E_F5_rem[j] = MIN2(matrices->E_F5_rem[j], matrices->E_C_rem[my_iindx[1] - j] + additional_en);
if (matrices->E_C[my_iindx[1] - j]) {
for (cnt1 = matrices->k_min_C[my_iindx[1] - j]; cnt1 <= matrices->k_max_C[my_iindx[1] - j]; cnt1++)
for (cnt2 = matrices->l_min_C[my_iindx[1] - j][cnt1]; cnt2 <= matrices->l_max_C[my_iindx[1] - j][cnt1]; cnt2 += 2) {
if (matrices->E_C[my_iindx[1] - j][cnt1][cnt2 / 2] != INF) {
matrices->E_F5[j][cnt1][cnt2 / 2] = MIN2(matrices->E_F5[j][cnt1][cnt2 / 2],
matrices->E_C[my_iindx[1] - j][cnt1][cnt2 / 2] + additional_en
);
updatePosteriorBoundaries(cnt1,
cnt2,
&min_k_real,
&max_k_real,
&min_l_real,
&max_l_real
);
#ifdef COUNT_STATES
matrices->N_F5[j][cnt1][cnt2 / 2] += matrices->N_C[my_iindx[1] - j][cnt1][cnt2 / 2];
#endif
}
}
}
/* j pairs with some other nucleotide -> see below */
for (i = j - turn - 1; i > 1; i--) {
ij = my_iindx[i] - j;
type = ptype[jindx[j] + i];
if (type) {
if (dangles == 2)
additional_en = vrna_E_ext_stem(type, S1[i - 1], j < seq_length ? S1[j + 1] : -1, P);
else
additional_en = vrna_E_ext_stem(type, -1, -1, P);
if (matrices->E_C_rem[ij] != INF) {
for (cnt3 = matrices->k_min_F5[i - 1]; cnt3 <= matrices->k_max_F5[i - 1]; cnt3++)
for (cnt4 = matrices->l_min_F5[i - 1][cnt3]; cnt4 <= matrices->l_max_F5[i - 1][cnt3]; cnt4 += 2) {
if (matrices->E_F5[i - 1][cnt3][cnt4 / 2] != INF) {
matrices->E_F5_rem[j] = MIN2(matrices->E_F5_rem[j],
matrices->E_F5[i - 1][cnt3][cnt4 / 2] + matrices->E_C_rem[ij] + additional_en
);
}
}
if (matrices->E_F5_rem[i - 1] != INF) {
matrices->E_F5_rem[j] = MIN2(matrices->E_F5_rem[j],
matrices->E_F5_rem[i - 1] + matrices->E_C_rem[ij] + additional_en
);
}
}
if ((matrices->E_F5_rem[i - 1] != INF) && (matrices->E_C[ij])) {
for (cnt1 = matrices->k_min_C[ij]; cnt1 <= matrices->k_max_C[ij]; cnt1++)
for (cnt2 = matrices->l_min_C[ij][cnt1]; cnt2 <= matrices->l_max_C[ij][cnt1]; cnt2 += 2)
if (matrices->E_C[ij][cnt1][cnt2 / 2] != INF) {
matrices->E_F5_rem[j] = MIN2(matrices->E_F5_rem[j],
matrices->E_F5_rem[i - 1] + matrices->E_C[ij][cnt1][cnt2 / 2] + additional_en
);
}
}
if (!matrices->E_C[ij])
continue;
unsigned int d1a = referenceBPs1[my_iindx[1] - j] - referenceBPs1[ij] - referenceBPs1[my_iindx[1] - i + 1];
unsigned int d1b = referenceBPs2[my_iindx[1] - j] - referenceBPs2[ij] - referenceBPs2[my_iindx[1] - i + 1];
for (cnt1 = matrices->k_min_C[ij]; cnt1 <= matrices->k_max_C[ij]; cnt1++)
for (cnt2 = matrices->l_min_C[ij][cnt1]; cnt2 <= matrices->l_max_C[ij][cnt1]; cnt2 += 2)
for (cnt3 = matrices->k_min_F5[i - 1]; cnt3 <= matrices->k_max_F5[i - 1]; cnt3++)
for (cnt4 = matrices->l_min_F5[i - 1][cnt3]; cnt4 <= matrices->l_max_F5[i - 1][cnt3]; cnt4 += 2) {
if (matrices->E_F5[i - 1][cnt3][cnt4 / 2] != INF && matrices->E_C[ij][cnt1][cnt2 / 2] != INF) {
if (((cnt1 + cnt3 + d1a) <= maxD1) && ((cnt2 + cnt4 + d1b) <= maxD2)) {
matrices->E_F5[j][cnt1 + cnt3 + d1a][(cnt2 + cnt4 + d1b) / 2] = MIN2(matrices->E_F5[j][cnt1 + cnt3 + d1a][(cnt2 + cnt4 + d1b) / 2],
matrices->E_F5[i - 1][cnt3][cnt4 / 2] + matrices->E_C[ij][cnt1][cnt2 / 2] + additional_en
);
updatePosteriorBoundaries(cnt1 + cnt3 + d1a,
cnt2 + cnt4 + d1b,
&min_k_real,
&max_k_real,
&min_l_real,
&max_l_real
);
#ifdef COUNT_STATES
matrices->N_F5[j][cnt1 + cnt3 + d1a][(cnt2 + cnt4 + d1b) / 2] += matrices->N_F5[i - 1][cnt3][cnt4 / 2] * matrices->N_C[ij][cnt1][cnt2 / 2];
#endif
}
/* collect all cases where d1a+cnt1+cnt3 or d1b+cnt2+cnt4 exceeds maxD1, maxD2, respectively */
else {
matrices->E_F5_rem[j] = MIN2(matrices->E_F5_rem[j],
matrices->E_F5[i - 1][cnt3][cnt4 / 2] + matrices->E_C[ij][cnt1][cnt2 / 2] + additional_en
);
}
}
}
}
}
/* resize and move memory portions of energy matrix E_F5 */
adjustArrayBoundaries(&matrices->E_F5[j],
&matrices->k_min_F5[j],
&matrices->k_max_F5[j],
&matrices->l_min_F5[j],
&matrices->l_max_F5[j],
min_k_real,
max_k_real,
min_l_real,
max_l_real
);
} /* end of j-loop */
if (compute_2Dfold_F3) {
/* prepare first entries in E_F3 */
for (cnt1 = seq_length; cnt1 >= seq_length - turn - 1; cnt1--) {
matrices->E_F3[cnt1] = (int **)vrna_alloc(sizeof(int *));
matrices->E_F3[cnt1][0] = (int *)vrna_alloc(sizeof(int));
matrices->E_F3[cnt1][0][0] = 0;
matrices->k_min_F3[cnt1] = matrices->k_max_F3[cnt1] = 0;
matrices->l_min_F3[cnt1] = (int *)vrna_alloc(sizeof(int));
matrices->l_max_F3[cnt1] = (int *)vrna_alloc(sizeof(int));
matrices->l_min_F3[cnt1][0] = matrices->l_max_F3[cnt1][0] = 0;
}
/* begin calculations */
for (j = seq_length - turn - 2; j >= 1; j--) {
unsigned int da = referenceBPs1[my_iindx[j] - seq_length] - referenceBPs1[my_iindx[j + 1] - seq_length];
unsigned int db = referenceBPs2[my_iindx[j] - seq_length] - referenceBPs2[my_iindx[j + 1] - seq_length];
type = ptype[jindx[seq_length] + j];
additional_en = 0;
if (type) {
if (dangles == 2)
additional_en += vrna_E_ext_stem(type, j > 1 ? S1[j - 1] : -1, -1, P);
else
additional_en += vrna_E_ext_stem(type, -1, -1, P);
}
/* make min and max k guess for memory allocation */
int min_k_guess, max_k_guess, min_l_guess, max_l_guess;
int *min_l_real, *max_l_real, min_k_real, max_k_real;
min_k_guess = min_l_guess = 0;
max_k_guess = referenceBPs1[my_iindx[j] - seq_length] + mm1[my_iindx[j] - seq_length];
max_l_guess = referenceBPs2[my_iindx[j] - seq_length] + mm2[my_iindx[j] - seq_length];
prepareBoundaries(min_k_guess,
max_k_guess,
min_l_guess,
max_l_guess,
bpdist[my_iindx[j] - seq_length],
&matrices->k_min_F3[j],
&matrices->k_max_F3[j],
&matrices->l_min_F3[j],
&matrices->l_max_F3[j]
);
preparePosteriorBoundaries(matrices->k_max_F3[j] - matrices->k_min_F3[j] + 1,
matrices->k_min_F3[j],
&min_k_real,
&max_k_real,
&min_l_real,
&max_l_real
);
prepareArray(&matrices->E_F3[j],
matrices->k_min_F3[j],
matrices->k_max_F3[j],
matrices->l_min_F3[j],
matrices->l_max_F3[j]
);
/* begin the actual computation of 5' end energies */
/* j is unpaired ... */
for (cnt1 = matrices->k_min_F3[j + 1]; cnt1 <= matrices->k_max_F3[j + 1]; cnt1++) {
for (cnt2 = matrices->l_min_F3[j + 1][cnt1]; cnt2 <= matrices->l_max_F3[j + 1][cnt1]; cnt2 += 2) {
matrices->E_F3[j][cnt1 + da][(cnt2 + db) / 2] = MIN2(matrices->E_F3[j][cnt1 + da][(cnt2 + db) / 2],
matrices->E_F3[j + 1][cnt1][cnt2 / 2]
);
updatePosteriorBoundaries(cnt1 + da,
cnt2 + db,
&min_k_real,
&max_k_real,
&min_l_real,
&max_l_real
);
}
}
/* j pairs with n */
if (matrices->E_C[my_iindx[j] - seq_length]) {
for (cnt1 = matrices->k_min_C[my_iindx[j] - seq_length]; cnt1 <= matrices->k_max_C[my_iindx[j] - seq_length]; cnt1++)
for (cnt2 = matrices->l_min_C[my_iindx[j] - seq_length][cnt1]; cnt2 <= matrices->l_max_C[my_iindx[j] - seq_length][cnt1]; cnt2 += 2) {
if (matrices->E_C[my_iindx[j] - seq_length][cnt1][cnt2 / 2] != INF) {
matrices->E_F3[j][cnt1][cnt2 / 2] = MIN2(matrices->E_F3[j][cnt1][cnt2 / 2],
matrices->E_C[my_iindx[j] - seq_length][cnt1][cnt2 / 2] + additional_en
);
updatePosteriorBoundaries(cnt1,
cnt2,
&min_k_real,
&max_k_real,
&min_l_real,
&max_l_real
);
}
}
}
/* j pairs with some other nucleotide -> see below */
for (i = j - turn - 1; i > 1; i--) {
ij = my_iindx[i] - j;
if (!matrices->E_C[ij])
continue;
type = ptype[jindx[j] + i];
if (type) {
unsigned int d1a = referenceBPs1[my_iindx[1] - j] - referenceBPs1[ij] - referenceBPs1[my_iindx[1] - i + 1];
unsigned int d1b = referenceBPs2[my_iindx[1] - j] - referenceBPs2[ij] - referenceBPs2[my_iindx[1] - i + 1];
if (dangles == 2)
additional_en = vrna_E_ext_stem(type, S1[i - 1], j < seq_length ? S1[j + 1] : -1, P);
else
additional_en = vrna_E_ext_stem(type, -1, -1, P);
for (cnt1 = matrices->k_min_C[ij]; cnt1 <= matrices->k_max_C[ij]; cnt1++)
for (cnt2 = matrices->l_min_C[ij][cnt1]; cnt2 <= matrices->l_max_C[ij][cnt1]; cnt2 += 2)
for (cnt3 = matrices->k_min_F5[i - 1]; cnt3 <= matrices->k_max_F5[i - 1]; cnt3++)
for (cnt4 = matrices->l_min_F5[i - 1][cnt3]; cnt4 <= matrices->l_max_F5[i - 1][cnt3]; cnt4 += 2) {
if (matrices->E_F5[i - 1][cnt3][cnt4 / 2] != INF && matrices->E_C[ij][cnt1][cnt2 / 2] != INF) {
matrices->E_F5[j][cnt1 + cnt3 + d1a][(cnt2 + cnt4 + d1b) / 2] = MIN2(matrices->E_F5[j][cnt1 + cnt3 + d1a][(cnt2 + cnt4 + d1b) / 2],
matrices->E_F5[i - 1][cnt3][cnt4 / 2] + matrices->E_C[ij][cnt1][cnt2 / 2] + additional_en
);
updatePosteriorBoundaries(cnt1 + cnt3 + d1a,
cnt2 + cnt4 + d1b,
&min_k_real,
&max_k_real,
&min_l_real,
&max_l_real
);
#ifdef COUNT_STATES
matrices->N_F5[j][cnt1 + cnt3 + d1a][(cnt2 + cnt4 + d1b) / 2] += matrices->N_F5[i - 1][cnt3][cnt4 / 2] * matrices->N_C[ij][cnt1][cnt2 / 2];
#endif
}
}
}
}
/* resize and move memory portions of energy matrix E_F5 */
adjustArrayBoundaries(&matrices->E_F5[j],
&matrices->k_min_F5[j],
&matrices->k_max_F5[j],
&matrices->l_min_F5[j],
&matrices->l_max_F5[j],
min_k_real,
max_k_real,
min_l_real,
max_l_real
);
} /* end of j-loop */
}
}
/*---------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------*/
PRIVATE void
backtrack_f5(unsigned int j,
int k,
int l,
char *structure,
vrna_fold_compound_t *vc)
{
int *my_iindx, *jindx, energy, type, dangles, cnt1, cnt2, cnt3, cnt4, turn;
int **l_min_C, **l_max_C, **l_min_F5, **l_max_F5;
int *k_min_C, *k_max_C, *k_min_F5, *k_max_F5;
int ***E_C, ***E_F5;
int *E_C_rem, *E_F5_rem;
unsigned int i, ij, seq_length, maxD1, maxD2;
short *S1;
unsigned int *referenceBPs1, *referenceBPs2;
char *ptype;
vrna_param_t *P;
vrna_md_t *md;
vrna_mx_mfe_t *matrices;
unsigned int da, db;
P = vc->params;
md = &(P->model_details);
matrices = vc->matrices;
seq_length = vc->length;
S1 = vc->sequence_encoding;
ptype = vc->ptype;
my_iindx = vc->iindx;
jindx = vc->jindx;
referenceBPs1 = vc->referenceBPs1;
referenceBPs2 = vc->referenceBPs2;
dangles = md->dangles;
turn = md->min_loop_size;
E_F5 = matrices->E_F5;
l_min_F5 = matrices->l_min_F5;
l_max_F5 = matrices->l_max_F5;
k_min_F5 = matrices->k_min_F5;
k_max_F5 = matrices->k_max_F5;
E_C = matrices->E_C;
l_min_C = matrices->l_min_C;
l_max_C = matrices->l_max_C;
k_min_C = matrices->k_min_C;
k_max_C = matrices->k_max_C;
E_F5_rem = matrices->E_F5_rem;
E_C_rem = matrices->E_C_rem;
maxD1 = vc->maxD1;
maxD2 = vc->maxD2;
da = referenceBPs1[my_iindx[1] - j] - referenceBPs1[my_iindx[1] - j + 1];
db = referenceBPs2[my_iindx[1] - j] - referenceBPs2[my_iindx[1] - j + 1];
if (j < turn + 2)
return;
/* F5[j] == F5[j-1] ? */
if (k == -1) {
if (E_F5_rem[j] == INF) {
return;
} else if (E_F5_rem[j] == E_F5_rem[j - 1]) {
backtrack_f5(j - 1, k, l, structure, vc);
return;
} else if (E_F5[j - 1]) {
for (cnt1 = k_min_F5[j - 1];
cnt1 <= k_max_F5[j - 1];
cnt1++) {
for (cnt2 = l_min_F5[j - 1][cnt1];
cnt2 <= l_max_F5[j - 1][cnt1];
cnt2 += 2) {
if (((cnt1 + da) > maxD1) || ((cnt2 + db) > maxD2)) {
if (E_F5_rem[j] == E_F5[j - 1][cnt1][cnt2 / 2]) {
backtrack_f5(j - 1, cnt1, cnt2, structure, vc);
return;
}
}
}
}
}
} else if ((k >= da) && (l >= db)) {
if (E_F5[j - 1]) {
if ((k - da >= k_min_F5[j - 1]) && (k - da <= k_max_F5[j - 1])) {
if ((l - db >= l_min_F5[j - 1][k - da]) && (l - db <= l_max_F5[j - 1][k - da])) {
if (E_F5[j - 1][k - da][(l - db) / 2] == E_F5[j][k][l / 2]) {
backtrack_f5(j - 1, k - da, l - db, structure, vc);
return;
}
}
}
}
}
type = ptype[jindx[j] + 1];
if (type) {
if (dangles == 2)
energy = vrna_E_ext_stem(type, -1, j < seq_length ? S1[j + 1] : -1, P);
else
energy = vrna_E_ext_stem(type, -1, -1, P);
if (k == -1) {
if (E_C_rem[my_iindx[1] - j] + energy == E_F5_rem[j]) {
backtrack_c(1, j, -1, -1, structure, vc);
return;
}
} else if (k >= k_min_C[my_iindx[1] - j] && (k <= k_max_C[my_iindx[1] - j])) {
if ((l >= l_min_C[my_iindx[1] - j][k]) && (l <= l_max_C[my_iindx[1] - j][k])) {
if (E_C[my_iindx[1] - j][k][l / 2] + energy == E_F5[j][k][l / 2]) {
backtrack_c(1, j, k, l, structure, vc);
return;
}
}
}
}
for (i = j - turn - 1; i > 1; i--) {
ij = my_iindx[i] - j;
type = ptype[jindx[j] + i];
if (type) {
unsigned int d1a = referenceBPs1[my_iindx[1] - j] - referenceBPs1[ij] - referenceBPs1[my_iindx[1] - i + 1];
unsigned int d1b = referenceBPs2[my_iindx[1] - j] - referenceBPs2[ij] - referenceBPs2[my_iindx[1] - i + 1];
if (dangles == 2)
energy = vrna_E_ext_stem(type, S1[i - 1], j < seq_length ? S1[j + 1] : -1, P);
else
energy = vrna_E_ext_stem(type, -1, -1, P);
if (k == -1) {
if (E_C_rem[ij] != INF) {
for (cnt1 = k_min_F5[i - 1];
cnt1 <= k_max_F5[i - 1];
cnt1++) {
for (cnt2 = l_min_F5[i - 1][cnt1];
cnt2 <= l_max_F5[i - 1][cnt1];
cnt2 += 2) {
if (E_F5_rem[j] == (E_F5[i - 1][cnt1][cnt2 / 2] + E_C_rem[ij] + energy)) {
backtrack_f5(i - 1, cnt1, cnt2, structure, vc);
backtrack_c(i, j, -1, -1, structure, vc);
return;
}
}
}
if (E_F5_rem[j] == (E_F5_rem[i - 1] + E_C_rem[ij] + energy)) {
backtrack_f5(i - 1, -1, -1, structure, vc);
backtrack_c(i, j, -1, -1, structure, vc);
return;
}
}
if (E_F5_rem[i - 1] != INF) {
for (cnt1 = k_min_C[ij];
cnt1 <= k_max_C[ij];
cnt1++) {
for (cnt2 = l_min_C[ij][cnt1];
cnt2 <= l_max_C[ij][cnt1];
cnt2 += 2) {
if (E_F5_rem[j] == (E_F5_rem[i - 1] + E_C[ij][cnt1][cnt2 / 2] + energy)) {
backtrack_f5(i - 1, -1, -1, structure, vc);
backtrack_c(i, j, cnt1, cnt2, structure, vc);
return;
}
}
}
}
for (cnt1 = k_min_F5[i - 1];
cnt1 <= k_max_F5[i - 1];
cnt1++)
for (cnt2 = l_min_F5[i - 1][cnt1];
cnt2 <= l_max_F5[i - 1][cnt1];
cnt2 += 2)
for (cnt3 = k_min_C[ij];
cnt3 <= k_max_C[ij];
cnt3++)
for (cnt4 = l_min_C[ij][cnt3];
cnt4 <= l_max_C[ij][cnt3];
cnt4 += 2) {
if (((cnt1 + cnt3 + d1a) > maxD1) || ((cnt2 + cnt4 + d1b) > maxD2)) {
if (E_F5_rem[j] == (E_F5[i - 1][cnt1][cnt2 / 2] + E_C[ij][cnt3][cnt4 / 2] + energy)) {
backtrack_f5(i - 1, cnt1, cnt2, structure, vc);
backtrack_c(i, j, cnt3, cnt4, structure, vc);
return;
}
}
}
} else if ((k >= d1a) && (l >= d1b)) {
int k_f_max = MIN2(k - d1a, k_max_F5[i - 1]);
for (cnt1 = k_min_F5[i - 1]; cnt1 <= k_f_max; cnt1++) {
int l_f_max = MIN2(l - d1b, l_max_F5[i - 1][cnt1]);
for (cnt2 = l_min_F5[i - 1][cnt1]; cnt2 <= l_f_max; cnt2 += 2) {
int k_c = k - d1a - cnt1;
if ((k_c >= k_min_C[ij]) && (k_c <= k_max_C[ij])) {
int l_c = l - d1b - cnt2;
if ((l_c >= l_min_C[ij][k_c]) && (l_c <= l_max_C[ij][k_c])) {
if (E_F5[j][k][l / 2] == (E_F5[i - 1][cnt1][cnt2 / 2] + E_C[ij][k_c][l_c / 2] + energy)) {
backtrack_f5(i - 1, cnt1, cnt2, structure, vc);
backtrack_c(i, j, k_c, l_c, structure, vc);
return;
}
}
}
}
}
}
}
}
vrna_message_error("backtracking failed in f5");
}
PRIVATE void
backtrack_c(unsigned int i,
unsigned int j,
int k,
int l,
char *structure,
vrna_fold_compound_t *vc)
{
unsigned int p, q, pq, ij, maxp, maxD1, maxD2;
int *my_iindx, *jindx, type, type_2, energy, no_close, dangles, base_d1, base_d2, d1, d2, cnt1, cnt2, cnt3, cnt4, *rtype, turn;
int **l_min_C, **l_max_C, **l_min_M, **l_max_M, **l_min_M1, **l_max_M1;
int *k_min_C, *k_max_C, *k_min_M, *k_max_M, *k_min_M1, *k_max_M1;
int ***E_C, ***E_M, ***E_M1, *E_C_rem, *E_M_rem, *E_M1_rem;
short *S1;
unsigned int *referenceBPs1, *referenceBPs2;
char *ptype, *sequence;
vrna_param_t *P;
vrna_md_t *md;
vrna_mx_mfe_t *matrices;
P = vc->params;
md = &(P->model_details);
matrices = vc->matrices;
sequence = vc->sequence;
S1 = vc->sequence_encoding;
ptype = vc->ptype;
rtype = &(md->rtype[0]);
my_iindx = vc->iindx;
jindx = vc->jindx;
referenceBPs1 = vc->referenceBPs1;
referenceBPs2 = vc->referenceBPs2;
dangles = md->dangles;
turn = md->min_loop_size;
E_C = matrices->E_C;
l_min_C = matrices->l_min_C;
l_max_C = matrices->l_max_C;
k_min_C = matrices->k_min_C;
k_max_C = matrices->k_max_C;
E_M = matrices->E_M;
l_min_M = matrices->l_min_M;
l_max_M = matrices->l_max_M;
k_min_M = matrices->k_min_M;
k_max_M = matrices->k_max_M;
E_M1 = matrices->E_M1;
l_min_M1 = matrices->l_min_M1;
l_max_M1 = matrices->l_max_M1;
k_min_M1 = matrices->k_min_M1;
k_max_M1 = matrices->k_max_M1;
E_C_rem = matrices->E_C_rem;
E_M_rem = matrices->E_M_rem;
E_M1_rem = matrices->E_M1_rem;
maxD1 = vc->maxD1;
maxD2 = vc->maxD2;
ij = my_iindx[i] - j;
int e = (k == -1) ? E_C_rem[ij] : E_C[ij][k][l / 2];
type = ptype[jindx[j] + i];
no_close = (((type == 3) || (type == 4)) && no_closingGU);
structure[i - 1] = '(';
structure[j - 1] = ')';
base_d1 = ((unsigned int)vc->reference_pt1[i] != j) ? 1 : -1;
base_d2 = ((unsigned int)vc->reference_pt2[i] != j) ? 1 : -1;
base_d1 += referenceBPs1[ij];
base_d2 += referenceBPs2[ij];
if (k == -1) {
if (((unsigned int)base_d1 > maxD1) || ((unsigned int)base_d2 > maxD2))
if (e == E_Hairpin(j - i - 1, type, S1[i + 1], S1[j - 1], sequence + i - 1, P))
return;
} else {
if ((unsigned int)base_d1 == k)
if ((unsigned int)base_d2 == l)
if (E_Hairpin(j - i - 1, type, S1[i + 1], S1[j - 1], sequence + i - 1, P) == e)
return;
}
maxp = MIN2(j - 2 - turn, i + MAXLOOP + 1);
for (p = i + 1; p <= maxp; p++) {
unsigned int minq, ln_pre;
minq = p + turn + 1;
ln_pre = j - i - 1;
if (ln_pre > minq + MAXLOOP)
minq = ln_pre - MAXLOOP - 1;
for (q = minq; q < j; q++) {
pq = my_iindx[p] - q;
type_2 = ptype[jindx[q] + p];
if (type_2 == 0)
continue;
type_2 = rtype[type_2];
/* d2 = dbp(S_{i,j}, S_{p.q} + {i,j}) */
d1 = base_d1 - referenceBPs1[pq];
d2 = base_d2 - referenceBPs2[pq];
energy = E_IntLoop(p - i - 1, j - q - 1, type, type_2, S1[i + 1], S1[j - 1], S1[p - 1], S1[q + 1], P);
if (k == -1) {
if (E_C_rem[pq] != INF) {
if (e == (E_C_rem[pq] + energy)) {
backtrack_c(p, q, -1, -1, structure, vc);
return;
}
}
if (E_C[pq]) {
for (cnt1 = k_min_C[pq];
cnt1 <= k_max_C[pq];
cnt1++)
for (cnt2 = l_min_C[pq][cnt1];
cnt2 <= l_max_C[pq][cnt1];
cnt2 += 2) {
if (((cnt1 + d1) > maxD1) || ((cnt2 + d2) > maxD2)) {
if (e == (E_C[pq][cnt1][cnt2 / 2] + energy)) {
backtrack_c(p, q, cnt1, cnt2, structure, vc);
return;
}
}
}
}
} else {
if (!E_C[pq])
continue;
if (d1 <= k && d2 <= l) {
if ((k - d1 >= k_min_C[pq]) && (k - d1) <= k_max_C[pq]) {
if ((l - d2 >= l_min_C[pq][k - d1]) && (l - d2 <= l_max_C[pq][k - d1])) {
if (E_C[pq][k - d1][(l - d2) / 2] + energy == e) {
backtrack_c(p, q, k - d1, l - d2, structure, vc);
return;
}
}
}
}
}
} /* end q-loop */
} /* end p-loop */
/* multi-loop decomposition ------------------------*/
if (!no_close) {
unsigned int u;
int tt;
if (k == -1) {
for (u = i + turn + 2; u < j - turn - 2; u++) {
int i1u, u1j1;
i1u = my_iindx[i + 1] - u;
u1j1 = my_iindx[u + 1] - j + 1;
tt = rtype[type];
energy = P->MLclosing;
if (dangles == 2)
energy += E_MLstem(tt, S1[j - 1], S1[i + 1], P);
else
energy += E_MLstem(tt, -1, -1, P);
if (E_M_rem[i1u] != INF) {
if (E_M1[u1j1]) {
for (cnt1 = k_min_M1[u1j1];
cnt1 <= k_max_M1[u1j1];
cnt1++)
for (cnt2 = l_min_M1[u1j1][cnt1];
cnt2 <= l_max_M1[u1j1][cnt1];
cnt2 += 2) {
if (e == (E_M_rem[i1u] + E_M1[u1j1][cnt1][cnt2 / 2] + energy)) {
backtrack_m(i + 1, u, -1, -1, structure, vc);
backtrack_m1(u + 1, j - 1, cnt1, cnt2, structure, vc);
return;
}
}
}
if (E_M1_rem[u1j1] != INF) {
if (e == (E_M_rem[i1u] + E_M1_rem[u1j1] + energy)) {
backtrack_m(i + 1, u, -1, -1, structure, vc);
backtrack_m1(u + 1, j - 1, -1, -1, structure, vc);
return;
}
}
}
if (E_M1_rem[u1j1] != INF) {
if (E_M[i1u]) {
for (cnt1 = k_min_M[i1u];
cnt1 <= k_max_M[i1u];
cnt1++)
for (cnt2 = l_min_M[i1u][cnt1];
cnt2 <= l_max_M[i1u][cnt1];
cnt2 += 2)
if (e == (E_M[i1u][cnt1][cnt2 / 2] + E_M1_rem[u1j1] + energy)) {
backtrack_m(i + 1, u, cnt1, cnt2, structure, vc);
backtrack_m1(u + 1, j - 1, -1, -1, structure, vc);
return;
}
}
}
/* now all cases where we exceed the maxD1/D2 scope by combination of E_M and E_M1 */
if (!E_M[i1u])
continue;
if (!E_M1[u1j1])
continue;
/* get distance to reference if closing this multiloop
* dist3 = dbp(S_{i,j}, {i,j} + S_{i+1.u} + S_{u+1,j-1})
*/
d1 = base_d1 - referenceBPs1[i1u] - referenceBPs1[u1j1];
d2 = base_d2 - referenceBPs2[i1u] - referenceBPs2[u1j1];
for (cnt1 = matrices->k_min_M[i1u];
cnt1 <= matrices->k_max_M[i1u];
cnt1++)
for (cnt2 = matrices->l_min_M[i1u][cnt1];
cnt2 <= matrices->l_max_M[i1u][cnt1];
cnt2 += 2)
for (cnt3 = matrices->k_min_M1[u1j1];
cnt3 <= matrices->k_max_M1[u1j1];
cnt3++)
for (cnt4 = matrices->l_min_M1[u1j1][cnt3];
cnt4 <= matrices->l_max_M1[u1j1][cnt3];
cnt4 += 2) {
if (((cnt1 + cnt3 + d1) > maxD1) || ((cnt2 + cnt4 + d2) > maxD2)) {
if (e == (E_M[i1u][cnt1][cnt2 / 2] + E_M1[u1j1][cnt3][cnt4 / 2] + energy)) {
backtrack_m(i + 1, u, cnt1, cnt2, structure, vc);
backtrack_m1(u + 1, j - 1, cnt3, cnt4, structure, vc);
return;
}
}
}
}
} else {
for (u = i + turn + 2; u < j - turn - 2; u++) {
int i1u, u1j1;
i1u = my_iindx[i + 1] - u;
u1j1 = my_iindx[u + 1] - j + 1;
if (!E_M[i1u])
continue;
if (!E_M1[u1j1])
continue;
/* get distance to reference if closing this multiloop
* dist3 = dbp(S_{i,j}, {i,j} + S_{i+1.u} + S_{u+1,j-1})
*/
d1 = base_d1 - referenceBPs1[i1u] - referenceBPs1[u1j1];
d2 = base_d2 - referenceBPs2[i1u] - referenceBPs2[u1j1];
tt = rtype[type];
energy = P->MLclosing;
if (dangles == 2)
energy += E_MLstem(tt, S1[j - 1], S1[i + 1], P);
else
energy += E_MLstem(tt, -1, -1, P);
if ((d1 <= k) && (d2 <= l)) {
for (cnt1 = k_min_M[i1u];
cnt1 <= MIN2(k - d1, k_max_M[i1u]);
cnt1++)
for (cnt2 = l_min_M[i1u][cnt1];
cnt2 <= MIN2(l - d2, l_max_M[i1u][cnt1]);
cnt2 += 2)
if (((k - d1 - cnt1) >= k_min_M1[u1j1])
&& ((k - d1 - cnt1) <= k_max_M1[u1j1])) {
if (((l - d2 - cnt2) >= l_min_M1[u1j1][k - d1 - cnt1])
&& ((l - d2 - cnt2) <= l_max_M1[u1j1][k - d1 - cnt1])) {
if (e == (energy + E_M[i1u][cnt1][cnt2 / 2] + E_M1[u1j1][k - d1 - cnt1][(l - d2 - cnt2) / 2])) {
backtrack_m(i + 1, u, cnt1, cnt2, structure, vc);
backtrack_m1(u + 1, j - 1, k - d1 - cnt1, l - d2 - cnt2, structure, vc);
return;
}
}
}
}
}
}
}
vrna_message_error("backtracking failed in c");
}
PRIVATE void
backtrack_m(unsigned int i,
unsigned int j,
int k,
int l,
char *structure,
vrna_fold_compound_t *vc)
{
unsigned int u, ij, seq_length, base_d1, base_d2, d1, d2, maxD1, maxD2;
int *my_iindx, *jindx, type, energy, dangles, circ, cnt1, cnt2, cnt3, cnt4, turn;
int **l_min_C, **l_max_C, **l_min_M, **l_max_M;
int *k_min_C, *k_max_C, *k_min_M, *k_max_M;
int ***E_C, ***E_M, *E_C_rem, *E_M_rem;
short *S1;
unsigned int *referenceBPs1, *referenceBPs2;
char *ptype;
vrna_param_t *P;
vrna_md_t *md;
vrna_mx_mfe_t *matrices;
P = vc->params;
md = &(P->model_details);
matrices = vc->matrices;
seq_length = vc->length;
S1 = vc->sequence_encoding;
circ = md->circ;
ptype = vc->ptype;
my_iindx = vc->iindx;
jindx = vc->jindx;
referenceBPs1 = vc->referenceBPs1;
referenceBPs2 = vc->referenceBPs2;
dangles = md->dangles;
turn = md->min_loop_size;
E_C = matrices->E_C;
l_min_C = matrices->l_min_C;
l_max_C = matrices->l_max_C;
k_min_C = matrices->k_min_C;
k_max_C = matrices->k_max_C;
E_M = matrices->E_M;
l_min_M = matrices->l_min_M;
l_max_M = matrices->l_max_M;
k_min_M = matrices->k_min_M;
k_max_M = matrices->k_max_M;
E_C_rem = matrices->E_C_rem;
E_M_rem = matrices->E_M_rem;
maxD1 = vc->maxD1;
maxD2 = vc->maxD2;
ij = my_iindx[i] - j;
int e = (k == -1) ? E_M_rem[ij] : E_M[ij][k][l / 2];
base_d1 = referenceBPs1[ij];
base_d2 = referenceBPs2[ij];
if (k == -1) {
/* new_fML = ML(i+1,j)+c */
d1 = base_d1 - referenceBPs1[my_iindx[i + 1] - j];
d2 = base_d2 - referenceBPs2[my_iindx[i + 1] - j];
if (E_M_rem[my_iindx[i + 1] - j] != INF) {
if (e == (E_M_rem[my_iindx[i + 1] - j] + P->MLbase)) {
backtrack_m(i + 1, j, -1, -1, structure, vc);
return;
}
}
if (E_M[my_iindx[i + 1] - j]) {
for (cnt1 = k_min_M[my_iindx[i + 1] - j];
cnt1 <= k_max_M[my_iindx[i + 1] - j];
cnt1++)
for (cnt2 = l_min_M[my_iindx[i + 1] - j][cnt1];
cnt2 <= l_max_M[my_iindx[i + 1] - j][cnt1];
cnt2 += 2)
if (((cnt1 + d1) > maxD1) || ((cnt2 + d2) > maxD2)) {
if (e == (E_M[my_iindx[i + 1] - j][cnt1][cnt2 / 2] + P->MLbase)) {
backtrack_m(i + 1, j, cnt1, cnt2, structure, vc);
return;
}
}
}
/* new_fML = min(ML(i,j-1) + c, new_fML) */
d1 = base_d1 - referenceBPs1[ij + 1];
d2 = base_d2 - referenceBPs2[ij + 1];
if (E_M_rem[ij + 1] != INF) {
if (e == (E_M_rem[ij + 1] + P->MLbase)) {
backtrack_m(i, j - 1, -1, -1, structure, vc);
return;
}
}
if (E_M[ij + 1]) {
for (cnt1 = k_min_M[ij + 1];
cnt1 <= k_max_M[ij + 1];
cnt1++)
for (cnt2 = l_min_M[ij + 1][cnt1];
cnt2 <= l_max_M[ij + 1][cnt1];
cnt2 += 2)
if (((cnt1 + d1) > maxD1) || ((cnt2 + d2) > maxD2)) {
if (e == (E_M[ij + 1][cnt1][cnt2 / 2] + P->MLbase)) {
backtrack_m(i, j - 1, cnt1, cnt2, structure, vc);
return;
}
}
}
/* new_fML = min(new_fML, C(i,j)+b) */
if (E_C_rem[ij] != INF) {
type = ptype[jindx[j] + i];
if (dangles == 2)
energy = E_MLstem(type, ((i > 1) || circ) ? S1[i - 1] : -1, ((j < seq_length) || circ) ? S1[j + 1] : -1, P);
else
energy = E_MLstem(type, -1, -1, P);
if (e == (E_C_rem[ij] + energy)) {
backtrack_c(i, j, -1, -1, structure, vc);
return;
}
}
/* modular decomposition -------------------------------*/
for (u = i + 1 + turn; u <= j - 2 - turn; u++) {
int iu, uj;
iu = my_iindx[i] - u;
uj = my_iindx[u + 1] - j;
type = ptype[jindx[j] + u + 1];
d1 = base_d1 - referenceBPs1[iu] - referenceBPs1[uj];
d2 = base_d2 - referenceBPs2[iu] - referenceBPs2[uj];
if (dangles == 2)
energy = E_MLstem(type, S1[u], (j < seq_length) || circ ? S1[j + 1] : -1, P);
else
energy = E_MLstem(type, -1, -1, P);
if (E_M_rem[iu] != INF) {
if (E_C[uj]) {
for (cnt1 = k_min_C[uj];
cnt1 <= k_max_C[uj];
cnt1++)
for (cnt2 = l_min_C[uj][cnt1];
cnt2 <= l_max_C[uj][cnt1];
cnt2 += 2)
if (e == (E_M_rem[iu] + E_C[uj][cnt1][cnt2 / 2] + energy)) {
backtrack_m(i, u, -1, -1, structure, vc);
backtrack_c(u + 1, j, cnt1, cnt2, structure, vc);
return;
}
}
if (E_C_rem[uj] != INF) {
if (e == (E_M_rem[iu] + E_C_rem[uj] + energy)) {
backtrack_m(i, u, -1, -1, structure, vc);
backtrack_c(u + 1, j, -1, -1, structure, vc);
return;
}
}
}
if (E_C_rem[uj] != INF) {
if (E_M[iu]) {
for (cnt1 = k_min_M[iu];
cnt1 <= k_max_M[iu];
cnt1++)
for (cnt2 = l_min_M[iu][cnt1];
cnt2 <= l_max_M[iu][cnt1];
cnt2 += 2)
if (e == (E_M[iu][cnt1][cnt2 / 2] + E_C_rem[uj] + energy)) {
backtrack_m(i, u, cnt1, cnt2, structure, vc);
backtrack_c(u + 1, j, -1, -1, structure, vc);
return;
}
}
}
if (!E_M[iu])
continue;
if (!E_C[uj])
continue;
for (cnt1 = k_min_M[iu];
cnt1 <= k_max_M[iu];
cnt1++)
for (cnt2 = l_min_M[iu][cnt1];
cnt2 <= l_max_M[iu][cnt1];
cnt2 += 2)
for (cnt3 = k_min_C[uj];
cnt3 <= k_max_C[uj];
cnt3++) {
for (cnt4 = l_min_C[uj][cnt3];
cnt4 <= l_max_C[uj][cnt3];
cnt4 += 2)
if (((cnt1 + cnt3 + d1) > maxD1) || ((cnt2 + cnt4 + d2) > maxD2)) {
if (e == (E_M[iu][cnt1][cnt2 / 2] + E_C[uj][cnt3][cnt4 / 2] + energy)) {
backtrack_m(i, u, cnt1, cnt2, structure, vc);
backtrack_c(u + 1, j, cnt3, cnt4, structure, vc);
return;
}
}
}
}
} /* end if (k == -1) */
else {
d1 = base_d1 - referenceBPs1[my_iindx[i + 1] - j];
d2 = base_d2 - referenceBPs2[my_iindx[i + 1] - j];
/* new_fML = ML(i+1,j)+c */
if (d1 <= k && d2 <= l) {
if ((k - d1 >= k_min_M[my_iindx[i + 1] - j]) && (k - d1 <= k_max_M[my_iindx[i + 1] - j])) {
if ((l - d2 >= l_min_M[my_iindx[i + 1] - j][k - d1]) && (l - d2 <= l_max_M[my_iindx[i + 1] - j][k - d1])) {
if (E_M[my_iindx[i + 1] - j][k - d1][(l - d2) / 2] + P->MLbase == e) {
backtrack_m(i + 1, j, k - d1, l - d2, structure, vc);
return;
}
}
}
}
d1 = base_d1 - referenceBPs1[ij + 1];
d2 = base_d2 - referenceBPs2[ij + 1];
/* new_fML = min(ML(i,j-1) + c, new_fML) */
if (E_M[ij + 1]) {
if (d1 <= k && d2 <= l) {
if ((k - d1 >= k_min_M[ij + 1]) && (k - d1 <= k_max_M[ij + 1])) {
if ((l - d2 >= l_min_M[ij + 1][k - d1]) && (l - d2 <= l_max_M[ij + 1][k - d1])) {
if (E_M[ij + 1][k - d1][(l - d2) / 2] + P->MLbase == e) {
backtrack_m(i, j - 1, k - d1, l - d2, structure, vc);
return;
}
}
}
}
}
/* new_fML = min(new_fML, C(i,j)+b) */
if (E_C[ij]) {
type = ptype[jindx[j] + i];
if (dangles == 2)
energy = E_MLstem(type, ((i > 1) || circ) ? S1[i - 1] : -1, ((j < seq_length) || circ) ? S1[j + 1] : -1, P);
else
energy = E_MLstem(type, -1, -1, P);
if ((k >= k_min_C[ij]) && (k <= k_max_C[ij])) {
if ((l >= l_min_C[ij][k]) && (l <= l_max_C[ij][k])) {
if (E_C[ij][k][l / 2] + energy == e) {
backtrack_c(i, j, k, l, structure, vc);
return;
}
}
}
}
/* modular decomposition -------------------------------*/
for (u = i + 1 + turn; u <= j - 2 - turn; u++) {
if (!E_M[my_iindx[i] - u])
continue;
if (!E_C[my_iindx[u + 1] - j])
continue;
type = ptype[jindx[j] + u + 1];
d1 = base_d1 - referenceBPs1[my_iindx[i] - u] - referenceBPs1[my_iindx[u + 1] - j];
d2 = base_d2 - referenceBPs2[my_iindx[i] - u] - referenceBPs2[my_iindx[u + 1] - j];
if (dangles == 2)
energy = E_MLstem(type, S1[u], ((j < seq_length) || circ) ? S1[j + 1] : -1, P);
else
energy = E_MLstem(type, -1, -1, P);
if (d1 <= k && d2 <= l) {
for (cnt1 = k_min_M[my_iindx[i] - u]; cnt1 <= MIN2(k - d1, k_max_M[my_iindx[i] - u]); cnt1++)
for (cnt2 = l_min_M[my_iindx[i] - u][cnt1]; cnt2 <= MIN2(l - d2, l_max_M[my_iindx[i] - u][cnt1]); cnt2 += 2)
if ((k - d1 - cnt1 >= k_min_C[my_iindx[u + 1] - j]) && (k - d1 - cnt1 <= k_max_C[my_iindx[u + 1] - j])) {
if ((l - d2 - cnt2 >= l_min_C[my_iindx[u + 1] - j][k - d1 - cnt1]) && (l - d2 - cnt2 <= l_max_C[my_iindx[u + 1] - j][k - d1 - cnt1])) {
if (E_M[my_iindx[i] - u][cnt1][cnt2 / 2] + E_C[my_iindx[u + 1] - j][k - d1 - cnt1][(l - d2 - cnt2) / 2] + energy == e) {
backtrack_m(i, u, cnt1, cnt2, structure, vc);
backtrack_c(u + 1, j, k - d1 - cnt1, l - d2 - cnt2, structure, vc);
return;
}
}
}
}
}
}
vrna_message_error("backtracking failed in fML\n");
}
PRIVATE void
backtrack_m1(unsigned int i,
unsigned int j,
int k,
int l,
char *structure,
vrna_fold_compound_t *vc)
{
unsigned int ij, seq_length, d1, d2, *referenceBPs1, *referenceBPs2, maxD1, maxD2;
int *my_iindx, *jindx, **l_min_C, **l_max_C, **l_min_M1, **l_max_M1;
int *k_min_C, *k_max_C, *k_min_M1, *k_max_M1, cnt1, cnt2;
int ***E_C, ***E_M1, *E_C_rem, *E_M1_rem, type, dangles, circ, energy, e_m1;
short *S1;
char *ptype;
vrna_param_t *P;
vrna_md_t *md;
vrna_mx_mfe_t *matrices;
P = vc->params;
md = &(P->model_details);
matrices = vc->matrices;
seq_length = vc->length;
S1 = vc->sequence_encoding;
ptype = vc->ptype;
circ = md->circ;
my_iindx = vc->iindx;
jindx = vc->jindx;
referenceBPs1 = vc->referenceBPs1;
referenceBPs2 = vc->referenceBPs2;
dangles = md->dangles;
E_C = matrices->E_C;
l_min_C = matrices->l_min_C;
l_max_C = matrices->l_max_C;
k_min_C = matrices->k_min_C;
k_max_C = matrices->k_max_C;
E_M1 = matrices->E_M1;
l_min_M1 = matrices->l_min_M1;
l_max_M1 = matrices->l_max_M1;
k_min_M1 = matrices->k_min_M1;
k_max_M1 = matrices->k_max_M1;
E_C_rem = matrices->E_C_rem;
E_M1_rem = matrices->E_M1_rem;
maxD1 = vc->maxD1;
maxD2 = vc->maxD2;
ij = my_iindx[i] - j;
e_m1 = (k == -1) ? E_M1_rem[ij] : E_M1[ij][k][l / 2];
type = ptype[jindx[j] + i];
d1 = referenceBPs1[ij] - referenceBPs1[ij + 1];
d2 = referenceBPs2[ij] - referenceBPs2[ij + 1];
if (dangles == 2)
energy = E_MLstem(type, (i > 1) || circ ? S1[i - 1] : -1, (j < seq_length) || circ ? S1[j + 1] : -1, P);
else
energy = E_MLstem(type, -1, -1, P);
if (k == -1) {
if (E_C_rem[ij] != INF) {
if (e_m1 == (E_C_rem[ij] + energy)) {
backtrack_c(i, j, -1, -1, structure, vc);
return;
}
}
if (E_M1_rem[ij + 1] != INF) {
if (e_m1 == (E_M1_rem[ij + 1] + P->MLbase)) {
backtrack_m1(i, j - 1, -1, -1, structure, vc);
return;
}
}
for (cnt1 = k_min_M1[ij + 1];
cnt1 <= k_max_M1[ij + 1];
cnt1++)
for (cnt2 = l_min_M1[ij + 1][cnt1];
cnt2 <= l_max_M1[ij + 1][cnt1];
cnt2 += 2)
if (((cnt1 + d1) > maxD1) || ((cnt2 + d2) > maxD2)) {
if (e_m1 == (E_M1[ij + 1][cnt1][cnt2 / 2] + P->MLbase)) {
backtrack_m1(i, j - 1, cnt1, cnt2, structure, vc);
return;
}
}
} else {
if (E_C[ij]) {
if ((k >= k_min_C[ij]) && (k <= k_max_C[ij])) {
if ((l >= l_min_C[ij][k]) && (l <= l_max_C[ij][k])) {
if (E_C[ij][k][l / 2] + energy == e_m1) {
backtrack_c(i, j, k, l, structure, vc);
return;
}
}
}
}
if (d1 <= k && d2 <= l) {
if ((k - d1 >= k_min_M1[ij + 1]) && (k - d1 <= k_max_M1[ij + 1])) {
if ((l - d2 >= l_min_M1[ij + 1][k - d1]) && (l - d2 <= l_max_M1[ij + 1][k - d1])) {
if (E_M1[ij + 1][k - d1][(l - d2) / 2] + P->MLbase == e_m1) {
backtrack_m1(i, j - 1, k - d1, l - d2, structure, vc);
return;
}
}
}
}
}
vrna_message_error("backtack failed in m1\n");
}
PRIVATE void
backtrack_fc(int k,
int l,
char *structure,
vrna_fold_compound_t *vc)
{
unsigned int d, i, j, seq_length, base_d1, base_d2, d1, d2, maxD1, maxD2;
int *my_iindx, *jindx, energy, cnt1, cnt2, cnt3, cnt4, *rtype, turn;
short *S1;
unsigned int *referenceBPs1, *referenceBPs2;
char *sequence, *ptype;
int **E_Fc, **E_FcH, **E_FcI, **E_FcM, ***E_C, ***E_M, ***E_M2;
int *E_C_rem, *E_M_rem, *E_M2_rem, E_Fc_rem, E_FcH_rem, E_FcI_rem, E_FcM_rem;
int **l_min_C, **l_max_C, *k_min_C, *k_max_C;
int **l_min_M, **l_max_M, *k_min_M, *k_max_M;
int **l_min_M2, **l_max_M2, *k_min_M2, *k_max_M2;
int *l_min_FcH, *l_max_FcH, k_min_FcH, k_max_FcH;
int *l_min_FcI, *l_max_FcI, k_min_FcI, k_max_FcI;
int *l_min_FcM, *l_max_FcM, k_min_FcM, k_max_FcM;
vrna_param_t *P;
vrna_md_t *md;
vrna_mx_mfe_t *matrices;
P = vc->params;
md = &(P->model_details);
matrices = vc->matrices;
sequence = vc->sequence;
seq_length = vc->length;
S1 = vc->sequence_encoding;
ptype = vc->ptype;
rtype = &(md->rtype[0]);
my_iindx = vc->iindx;
jindx = vc->jindx;
referenceBPs1 = vc->referenceBPs1;
referenceBPs2 = vc->referenceBPs2;
turn = md->min_loop_size;
base_d1 = referenceBPs1[my_iindx[1] - seq_length];
base_d2 = referenceBPs2[my_iindx[1] - seq_length];
E_C = matrices->E_C;
l_min_C = matrices->l_min_C;
l_max_C = matrices->l_max_C;
k_min_C = matrices->k_min_C;
k_max_C = matrices->k_max_C;
E_M = matrices->E_M;
l_min_M = matrices->l_min_M;
l_max_M = matrices->l_max_M;
k_min_M = matrices->k_min_M;
k_max_M = matrices->k_max_M;
E_M2 = matrices->E_M2;
l_min_M2 = matrices->l_min_M2;
l_max_M2 = matrices->l_max_M2;
k_min_M2 = matrices->k_min_M2;
k_max_M2 = matrices->k_max_M2;
E_Fc = matrices->E_Fc;
E_FcI = matrices->E_FcI;
l_min_FcI = matrices->l_min_FcI;
l_max_FcI = matrices->l_max_FcI;
k_min_FcI = matrices->k_min_FcI;
k_max_FcI = matrices->k_max_FcI;
E_FcH = matrices->E_FcH;
l_min_FcH = matrices->l_min_FcH;
l_max_FcH = matrices->l_max_FcH;
k_min_FcH = matrices->k_min_FcH;
k_max_FcH = matrices->k_max_FcH;
E_FcM = matrices->E_FcM;
l_min_FcM = matrices->l_min_FcM;
l_max_FcM = matrices->l_max_FcM;
k_min_FcM = matrices->k_min_FcM;
k_max_FcM = matrices->k_max_FcM;
E_C_rem = matrices->E_C_rem;
E_M_rem = matrices->E_M_rem;
E_M2_rem = matrices->E_M2_rem;
E_Fc_rem = matrices->E_Fc_rem;
E_FcH_rem = matrices->E_FcH_rem;
E_FcI_rem = matrices->E_FcI_rem;
E_FcM_rem = matrices->E_FcM_rem;
maxD1 = vc->maxD1;
maxD2 = vc->maxD2;
if (k == -1) {
/* check if mfe might be open chain */
if (E_Fc_rem == 0)
if ((referenceBPs1[my_iindx[1] - seq_length] > maxD1) || (referenceBPs2[my_iindx[1] - seq_length] > maxD2))
return;
/* check for hairpin configurations */
if (E_Fc_rem == E_FcH_rem) {
for (d = turn + 2; d <= seq_length; d++) /* i,j in [1..length] */
for (j = d; j <= seq_length; j++) {
unsigned int u, ij;
int type, no_close;
char loopseq[10];
i = j - d + 1;
ij = my_iindx[i] - j;
u = seq_length - j + i - 1;
if (u < turn)
continue;
type = ptype[jindx[j] + i];
no_close = (((type == 3) || (type == 4)) && no_closingGU);
type = rtype[type];
if (!type)
continue;
if (no_close)
continue;
d1 = base_d1 - referenceBPs1[ij];
d2 = base_d2 - referenceBPs2[ij];
if (u < 7) {
strcpy(loopseq, sequence + j - 1);
strncat(loopseq, sequence, i);
}
energy = E_Hairpin(u, type, S1[j + 1], S1[i - 1], loopseq, P);
if (E_C_rem[ij] != INF) {
if (E_Fc_rem == (E_C_rem[ij] + energy)) {
backtrack_c(i, j, -1, -1, structure, vc);
return;
}
}
if (E_C[ij]) {
for (cnt1 = k_min_C[ij];
cnt1 <= k_max_C[ij];
cnt1++)
for (cnt2 = l_min_C[ij][cnt1];
cnt2 <= l_max_C[ij][cnt1];
cnt2 += 2)
if (((cnt1 + d1) > maxD1) || ((cnt2 + d2) > maxD2)) {
if (E_Fc_rem == (E_C[ij][cnt1][cnt2 / 2] + energy)) {
backtrack_c(i, j, cnt1, cnt2, structure, vc);
return;
}
}
}
}
}
/* check for interior loop configurations */
if (E_Fc_rem == E_FcI_rem) {
for (d = turn + 2; d <= seq_length; d++) /* i,j in [1..length] */
for (j = d; j <= seq_length; j++) {
unsigned int u, ij, p, q, pq;
int type, type_2;
i = j - d + 1;
ij = my_iindx[i] - j;
u = seq_length - j + i - 1;
if (u < turn)
continue;
type = rtype[(unsigned int)ptype[jindx[j] + i]];
if (!type)
continue;
for (p = j + 1; p < seq_length; p++) {
unsigned int u1, qmin, ln_pre;
u1 = p - j - 1;
if (u1 + i - 1 > MAXLOOP)
break;
qmin = p + turn + 1;
ln_pre = u1 + i + seq_length;
if (ln_pre > qmin + MAXLOOP)
qmin = ln_pre - MAXLOOP - 1;
for (q = qmin; q <= seq_length; q++) {
unsigned int u2;
pq = my_iindx[p] - q;
type_2 = rtype[(unsigned int)ptype[jindx[q] + p]];
if (type_2 == 0)
continue;
u2 = i - 1 + seq_length - q;
if (u1 + u2 > MAXLOOP)
continue;
energy = E_IntLoop(u1, u2, type, type_2, S1[j + 1], S1[i - 1], S1[p - 1], S1[q + 1], P);
if (E_C_rem[ij] != INF) {
if (E_C[pq]) {
for (cnt1 = k_min_C[pq];
cnt1 <= k_max_C[pq];
cnt1++)
for (cnt2 = l_min_C[pq][cnt1];
cnt2 <= l_max_C[pq][cnt1];
cnt2 += 2)
if (E_Fc_rem == (E_C_rem[ij] + E_C[pq][cnt1][cnt2 / 2] + energy)) {
backtrack_c(i, j, -1, -1, structure, vc);
backtrack_c(p, q, cnt1, cnt2, structure, vc);
return;
}
}
if (E_C_rem[pq] != INF) {
if (E_Fc_rem == (E_C_rem[ij] + E_C_rem[pq] + energy)) {
backtrack_c(i, j, -1, -1, structure, vc);
backtrack_c(p, q, -1, -1, structure, vc);
return;
}
}
}
if (E_C_rem[pq] != INF) {
if (E_C[ij]) {
for (cnt1 = k_min_C[ij];
cnt1 <= k_max_C[ij];
cnt1++)
for (cnt2 = l_min_C[ij][cnt1];
cnt2 <= l_max_C[ij][cnt1];
cnt2 += 2)
if (E_Fc_rem == (E_C[ij][cnt1][cnt2 / 2] + E_C_rem[pq] + energy)) {
backtrack_c(i, j, cnt1, cnt2, structure, vc);
backtrack_c(p, q, -1, -1, structure, vc);
return;
}
}
}
if (!(E_C[ij]))
continue;
if (!(E_C[pq]))
continue;
/* get distance to reference if closing the interior loop
* d2a = dbp(T1_[1,n}, T1_{p,q} + T1_{i,j})
* d2b = dbp(T2_[1,n}, T2_{p,q} + T2_{i,j})
*/
d1 = base_d1 - referenceBPs1[ij] - referenceBPs1[pq];
d2 = base_d2 - referenceBPs2[ij] - referenceBPs2[pq];
for (cnt1 = k_min_C[ij];
cnt1 <= k_max_C[ij];
cnt1++)
for (cnt2 = l_min_C[ij][cnt1];
cnt2 <= l_max_C[ij][cnt1];
cnt2 += 2)
for (cnt3 = k_min_C[pq];
cnt3 <= k_max_C[pq];
cnt3++)
for (cnt4 = l_min_C[pq][cnt3];
cnt4 <= l_max_C[pq][cnt3];
cnt4 += 2)
if (((cnt1 + cnt3 + d1) > maxD1) || ((cnt2 + cnt4 + d2) > maxD2)) {
if (E_Fc_rem == (E_C[ij][cnt1][cnt2 / 2] + E_C[pq][cnt3][cnt4 / 2] + energy)) {
backtrack_c(i, j, cnt1, cnt2, structure, vc);
backtrack_c(p, q, cnt3, cnt4, structure, vc);
return;
}
}
} /* end for p */
} /* end for q */
}
}
/* check for multi loop configurations */
if (E_Fc_rem == E_FcM_rem) {
if (seq_length > 2 * turn) {
for (i = turn + 1; i < seq_length - 2 * turn; i++) {
/* get distancies to references
* d3a = dbp(T1_[1,n}, T1_{1,k} + T1_{k+1, n})
* d3b = dbp(T2_[1,n}, T2_{1,k} + T2_{k+1, n})
*/
if (E_M_rem[my_iindx[1] - i] != INF) {
if (E_M2[i + 1]) {
for (cnt1 = k_min_M2[i + 1];
cnt1 <= k_max_M2[i + 1];
cnt1++)
for (cnt2 = l_min_M2[i + 1][cnt1];
cnt2 <= l_max_M2[i + 1][cnt1];
cnt2 += 2)
if (E_Fc_rem == (E_M_rem[my_iindx[1] - i] + E_M2[i + 1][cnt1][cnt2 / 2] + P->MLclosing)) {
backtrack_m(1, i, -1, -1, structure, vc);
backtrack_m2(i + 1, cnt1, cnt2, structure, vc);
return;
}
}
if (E_M2_rem[i + 1] != INF) {
if (E_Fc_rem == (E_M_rem[my_iindx[1] - i] + E_M2_rem[i + 1] + P->MLclosing)) {
backtrack_m(1, i, -1, -1, structure, vc);
backtrack_m2(i + 1, -1, -1, structure, vc);
return;
}
}
}
if (E_M2_rem[i + 1] != INF) {
if (E_M[my_iindx[1] - i]) {
for (cnt1 = k_min_M[my_iindx[1] - i];
cnt1 <= k_max_M[my_iindx[1] - i];
cnt1++)
for (cnt2 = l_min_M[my_iindx[1] - i][cnt1];
cnt2 <= l_max_M[my_iindx[1] - i][cnt1];
cnt2 += 2)
if (E_Fc_rem == (E_M[my_iindx[1] - i][cnt1][cnt2 / 2] + E_M2_rem[i + 1] + P->MLclosing)) {
backtrack_m(1, i, cnt1, cnt2, structure, vc);
backtrack_m2(i + 1, -1, -1, structure, vc);
return;
}
}
}
if (!(E_M[my_iindx[1] - i]))
continue;
if (!(E_M2[i + 1]))
continue;
d1 = base_d1 - referenceBPs1[my_iindx[1] - i] - referenceBPs1[my_iindx[i + 1] - seq_length];
d2 = base_d2 - referenceBPs2[my_iindx[1] - i] - referenceBPs2[my_iindx[i + 1] - seq_length];
for (cnt1 = k_min_M[my_iindx[1] - i];
cnt1 <= k_max_M[my_iindx[1] - i];
cnt1++)
for (cnt2 = l_min_M[my_iindx[1] - i][cnt1];
cnt2 <= l_max_M[my_iindx[1] - i][cnt1];
cnt2 += 2)
for (cnt3 = k_min_M2[i + 1];
cnt3 <= k_max_M2[i + 1];
cnt3++)
for (cnt4 = l_min_M2[i + 1][cnt3];
cnt4 <= l_max_M2[i + 1][cnt3];
cnt4 += 2)
if (((cnt1 + cnt3 + d1) > maxD1) || ((cnt2 + cnt4 + d2) > maxD2)) {
if (E_Fc_rem == (E_M[my_iindx[1] - i][cnt1][cnt2 / 2] + E_M2[i + 1][cnt3][cnt4 / 2] + P->MLclosing)) {
backtrack_m(1, i, cnt1, cnt2, structure, vc);
backtrack_m2(i + 1, cnt3, cnt4, structure, vc);
return;
}
}
}
}
}
} else {
/* open chain ? */
if (E_Fc[k][l / 2] == 0)
if ((k == referenceBPs1[my_iindx[1] - seq_length]) && (l == referenceBPs2[my_iindx[1] - seq_length]))
return;
if ((k >= k_min_FcH) && (k <= k_max_FcH)) {
if ((l >= l_min_FcH[k]) && (l <= l_max_FcH[k])) {
if (E_Fc[k][l / 2] == E_FcH[k][l / 2]) {
for (d = turn + 2; d <= seq_length; d++) /* i,j in [1..length] */
for (j = d; j <= seq_length; j++) {
unsigned int u, ij;
int type, no_close;
char loopseq[10];
i = j - d + 1;
ij = my_iindx[i] - j;
if (!E_C[ij])
continue;
u = seq_length - j + i - 1;
if (u < turn)
continue;
type = ptype[jindx[j] + i];
no_close = (((type == 3) || (type == 4)) && no_closingGU);
type = rtype[type];
if (!type)
continue;
if (no_close)
continue;
d1 = base_d1 - referenceBPs1[ij];
d2 = base_d2 - referenceBPs2[ij];
if (u < 7) {
strcpy(loopseq, sequence + j - 1);
strncat(loopseq, sequence, i);
}
energy = E_Hairpin(u, type, S1[j + 1], S1[i - 1], loopseq, P);
if ((k >= d1) && (l >= d2)) {
if ((k - d1 >= k_min_C[ij]) && (k - d1 <= k_max_C[ij])) {
if ((l - d2 >= l_min_C[ij][k - d1]) && (l - d2 <= l_max_C[ij][k - d1])) {
if (E_Fc[k][l / 2] == E_C[ij][k - d1][(l - d2) / 2] + energy) {
backtrack_c(i, j, k - d1, l - d2, structure, vc);
return;
}
}
}
}
}
}
}
}
if ((k >= k_min_FcI) && (k <= k_max_FcI)) {
if ((l >= l_min_FcI[k]) && (l <= l_max_FcI[k])) {
if (E_Fc[k][l / 2] == E_FcI[k][l / 2]) {
for (d = turn + 2; d <= seq_length; d++) /* i,j in [1..length] */
for (j = d; j <= seq_length; j++) {
unsigned int u, ij, p, q, pq;
int type, type_2;
i = j - d + 1;
ij = my_iindx[i] - j;
if (!E_C[ij])
continue;
u = seq_length - j + i - 1;
if (u < turn)
continue;
type = ptype[jindx[j] + i];
type = rtype[type];
if (!type)
continue;
for (p = j + 1; p < seq_length; p++) {
unsigned int u1, qmin, ln_pre;
u1 = p - j - 1;
if (u1 + i - 1 > MAXLOOP)
break;
qmin = p + turn + 1;
ln_pre = u1 + i + seq_length;
if (ln_pre > qmin + MAXLOOP)
qmin = ln_pre - MAXLOOP - 1;
for (q = qmin; q <= seq_length; q++) {
unsigned int u2;
pq = my_iindx[p] - q;
if (!E_C[pq])
continue;
type_2 = rtype[(unsigned int)ptype[jindx[q] + p]];
if (type_2 == 0)
continue;
u2 = i - 1 + seq_length - q;
if (u1 + u2 > MAXLOOP)
continue;
/* get distance to reference if closing the interior loop
* d2a = dbp(T1_[1,n}, T1_{p,q} + T1_{i,j})
* d2b = dbp(T2_[1,n}, T2_{p,q} + T2_{i,j})
*/
d1 = base_d1 - referenceBPs1[ij] - referenceBPs1[pq];
d2 = base_d2 - referenceBPs2[ij] - referenceBPs2[pq];
energy = E_IntLoop(u1, u2, type, type_2, S1[j + 1], S1[i - 1], S1[p - 1], S1[q + 1], P);
if ((k >= d1) && (l >= d2)) {
for (cnt1 = k_min_C[ij]; cnt1 <= MIN2(k_max_C[ij], k - d1); cnt1++)
for (cnt2 = l_min_C[ij][cnt1]; cnt2 <= MIN2(l_max_C[ij][cnt1], l - d2); cnt2 += 2)
if ((k - d1 - cnt1 >= k_min_C[pq]) && (k - d1 - cnt1 <= k_max_C[pq])) {
if ((l - d2 - cnt2 >= l_min_C[pq][k - d1 - cnt1]) && (l - d2 - cnt2 <= l_max_C[pq][k - d1 - cnt1])) {
if ((E_C[ij][cnt1][cnt2 / 2] + E_C[pq][k - d1 - cnt1][(l - d2 - cnt2) / 2] + energy) == E_Fc[k][l / 2]) {
backtrack_c(i, j, cnt1, cnt2, structure, vc);
backtrack_c(p, q, k - d1 - cnt1, l - d2 - cnt2, structure, vc);
return;
}
}
}
}
}
}
}
}
}
}
if ((k >= k_min_FcM) && (k <= k_max_FcM)) {
if ((l >= l_min_FcM[k]) && (l <= l_max_FcM[k])) {
if (E_Fc[k][l / 2] == E_FcM[k][l / 2]) {
if (seq_length > 2 * turn) {
for (i = turn + 1; i < seq_length - 2 * turn; i++) {
/* get distancies to references
* d3a = dbp(T1_[1,n}, T1_{1,k} + T1_{k+1, n})
* d3b = dbp(T2_[1,n}, T2_{1,k} + T2_{k+1, n})
*/
if (!E_M[my_iindx[1] - i])
continue;
if (!E_M2[i + 1])
continue;
d1 = base_d1 - referenceBPs1[my_iindx[1] - i] - referenceBPs1[my_iindx[i + 1] - seq_length];
d2 = base_d2 - referenceBPs2[my_iindx[1] - i] - referenceBPs2[my_iindx[i + 1] - seq_length];
if ((k >= d1) && (l >= d2)) {
for (cnt1 = k_min_M[my_iindx[1] - i]; cnt1 <= MIN2(k_max_M[my_iindx[1] - i], k - d1); cnt1++)
for (cnt2 = l_min_M[my_iindx[1] - i][cnt1]; cnt2 <= MIN2(l_max_M[my_iindx[1] - i][cnt1], l - d2); cnt2 += 2)
if ((k - d1 - cnt1 >= k_min_M2[i + 1]) && (k - d1 - cnt1 <= k_max_M2[i + 1])) {
if ((l - d2 - cnt2 >= l_min_M2[i + 1][k - d1 - cnt1]) && (l - d2 - cnt2 <= l_max_M2[i + 1][k - d1 - cnt1])) {
if ((E_M[my_iindx[1] - i][cnt1][cnt2 / 2] + E_M2[i + 1][k - d1 - cnt1][(l - d2 - cnt2) / 2] + P->MLclosing) == E_FcM[k][l / 2]) {
backtrack_m(1, i, cnt1, cnt2, structure, vc);
backtrack_m2(i + 1, k - d1 - cnt1, l - d2 - cnt2, structure, vc);
return;
}
}
}
}
}
}
}
}
}
}
vrna_message_error("backtack failed in fc\n");
}
PRIVATE void
backtrack_m2(unsigned int i,
int k,
int l,
char *structure,
vrna_fold_compound_t *vc)
{
unsigned int j, ij, j3, n;
unsigned int *referenceBPs1, *referenceBPs2;
unsigned int d1, d2, base_d1, base_d2, maxD1, maxD2;
int *my_iindx, cnt1, cnt2, cnt3, cnt4, turn;
int ***E_M1, ***E_M2, *E_M2_rem, *E_M1_rem, e;
int **l_min_M1, **l_max_M1, *k_min_M1, *k_max_M1;
vrna_mx_mfe_t *matrices;
matrices = vc->matrices;
n = vc->length;
my_iindx = vc->iindx;
referenceBPs1 = vc->referenceBPs1;
referenceBPs2 = vc->referenceBPs2;
turn = vc->params->model_details.min_loop_size;
E_M1 = matrices->E_M1;
l_min_M1 = matrices->l_min_M1;
l_max_M1 = matrices->l_max_M1;
k_min_M1 = matrices->k_min_M1;
k_max_M1 = matrices->k_max_M1;
E_M1_rem = matrices->E_M1_rem;
E_M2 = matrices->E_M2;
E_M2_rem = matrices->E_M2_rem;
maxD1 = vc->maxD1;
maxD2 = vc->maxD2;
base_d1 = referenceBPs1[my_iindx[i] - n];
base_d2 = referenceBPs2[my_iindx[i] - n];
if (k == -1) {
e = E_M2_rem[i];
for (j = i + turn + 1; j < n - turn - 1; j++) {
if (E_M1_rem[my_iindx[i] - j] != INF) {
if (E_M1[my_iindx[j + 1] - n]) {
for (cnt1 = k_min_M1[my_iindx[j + 1] - n];
cnt1 <= k_max_M1[my_iindx[j + 1] - n];
cnt1++)
for (cnt2 = l_min_M1[my_iindx[j + 1] - n][cnt1];
cnt2 <= l_max_M1[my_iindx[j + 1] - n][cnt1];
cnt2++)
if (e == E_M1_rem[my_iindx[i] - j] + E_M1[my_iindx[j + 1] - n][cnt1][cnt2 / 2]) {
backtrack_m1(i, j, k, l, structure, vc);
backtrack_m1(j + 1, n, cnt1, cnt2, structure, vc);
return;
}
}
if (E_M1_rem[my_iindx[j + 1] - n] != INF) {
if (e == E_M1_rem[my_iindx[i] - j] + E_M1_rem[my_iindx[j + 1] - n]) {
backtrack_m1(i, j, k, l, structure, vc);
backtrack_m1(j + 1, n, k, l, structure, vc);
return;
}
}
}
if (E_M1_rem[my_iindx[j + 1] - n] != INF) {
if (E_M1[my_iindx[i] - j]) {
for (cnt1 = k_min_M1[my_iindx[i] - j];
cnt1 <= k_max_M1[my_iindx[i] - j];
cnt1++)
for (cnt2 = l_min_M1[my_iindx[i] - j][cnt1];
cnt2 <= l_max_M1[my_iindx[i] - j][cnt1];
cnt2 += 2)
if (e == E_M1[my_iindx[i] - j][cnt1][cnt2 / 2] + E_M1_rem[my_iindx[j + 1] - n]) {
backtrack_m1(i, j, cnt1, cnt2, structure, vc);
backtrack_m1(j + 1, n, k, l, structure, vc);
return;
}
}
}
if (!E_M1[my_iindx[i] - j])
continue;
if (!E_M1[my_iindx[j + 1] - n])
continue;
d1 = referenceBPs1[my_iindx[i] - n] - referenceBPs1[my_iindx[i] - j] - referenceBPs1[my_iindx[j + 1] - n];
d2 = referenceBPs2[my_iindx[i] - n] - referenceBPs2[my_iindx[i] - j] - referenceBPs2[my_iindx[j + 1] - n];
for (cnt1 = k_min_M1[my_iindx[i] - j]; cnt1 <= k_max_M1[my_iindx[i] - j]; cnt1++)
for (cnt2 = l_min_M1[my_iindx[i] - j][cnt1]; cnt2 <= l_max_M1[my_iindx[i] - j][cnt1]; cnt2 += 2) {
for (cnt3 = k_min_M1[my_iindx[j + 1] - n]; cnt3 <= k_max_M1[my_iindx[j + 1] - n]; cnt3++)
for (cnt4 = l_min_M1[my_iindx[j + 1] - n][cnt3]; cnt4 <= l_max_M1[my_iindx[j + 1] - n][cnt3]; cnt4 += 2) {
if (((cnt1 + cnt3 + d1) > maxD1) || ((cnt2 + cnt4 + d2) > maxD2)) {
if (e == E_M1[my_iindx[i] - j][cnt1][cnt2 / 2] + E_M1[my_iindx[j + 1] - n][cnt3][cnt4 / 2]) {
backtrack_m1(i, j, cnt1, cnt2, structure, vc);
backtrack_m1(j + 1, n, cnt3, cnt4, structure, vc);
return;
}
}
}
}
}
} else {
for (j = i + turn + 1; j < n - turn - 1; j++) {
if (!E_M1[my_iindx[i] - j])
continue;
if (!E_M1[my_iindx[j + 1] - n])
continue;
ij = my_iindx[i] - j;
j3 = my_iindx[j + 1] - n;
d1 = base_d1 - referenceBPs1[ij] - referenceBPs1[j3];
d2 = base_d2 - referenceBPs2[ij] - referenceBPs2[j3];
for (cnt1 = k_min_M1[ij]; cnt1 <= MIN2(k_max_M1[ij], k - d1); cnt1++)
for (cnt2 = l_min_M1[ij][cnt1]; cnt2 <= MIN2(l_max_M1[ij][cnt1], l - d2); cnt2 += 2)
if ((k - d1 - cnt1 >= k_min_M1[j3]) && (k - d1 - cnt1 <= k_max_M1[j3])) {
if ((l - d2 - cnt2 >= l_min_M1[j3][k - d1 - cnt1]) && (l - d2 - cnt2 <= l_max_M1[j3][k - d1 - cnt1])) {
if (E_M1[ij][cnt1][cnt2 / 2] + E_M1[j3][k - d1 - cnt1][(l - d2 - cnt2) / 2] == E_M2[i][k][l / 2]) {
backtrack_m1(i, j, cnt1, cnt2, structure, vc);
backtrack_m1(j + 1, n, k - d1 - cnt1, l - d2 - cnt2, structure, vc);
return;
}
}
}
}
}
vrna_message_error("backtack failed in m2\n");
}
PRIVATE void
mfe_circ(vrna_fold_compound_t *vc)
{
unsigned int d, i, j, maxD1, maxD2, seq_length, *referenceBPs1, *referenceBPs2, d1, d2, base_d1, base_d2, *mm1, *mm2, *bpdist;
int *my_iindx, *jindx, energy, cnt1, cnt2, cnt3, cnt4, *rtype, turn;
short *S1;
char *sequence, *ptype;
int ***E_C, ***E_M, ***E_M1;
int *E_C_rem, *E_M_rem, *E_M1_rem;
int **l_min_C, **l_max_C, **l_min_M, **l_max_M, **l_min_M1, **l_max_M1;
int *k_min_C, *k_max_C, *k_min_M, *k_max_M, *k_min_M1, *k_max_M1;
vrna_param_t *P;
vrna_md_t *md;
vrna_mx_mfe_t *matrices;
P = vc->params;
md = &(P->model_details);
matrices = vc->matrices;
sequence = vc->sequence;
seq_length = vc->length;
maxD1 = vc->maxD1;
maxD2 = vc->maxD2;
S1 = vc->sequence_encoding;
ptype = vc->ptype;
rtype = &(md->rtype[0]);
my_iindx = vc->iindx;
jindx = vc->jindx;
referenceBPs1 = vc->referenceBPs1;
referenceBPs2 = vc->referenceBPs2;
mm1 = vc->mm1;
mm2 = vc->mm2;
bpdist = vc->bpdist;
turn = md->min_loop_size;
E_C = matrices->E_C;
l_min_C = matrices->l_min_C;
l_max_C = matrices->l_max_C;
k_min_C = matrices->k_min_C;
k_max_C = matrices->k_max_C;
E_M = matrices->E_M;
l_min_M = matrices->l_min_M;
l_max_M = matrices->l_max_M;
k_min_M = matrices->k_min_M;
k_max_M = matrices->k_max_M;
E_M1 = matrices->E_M1;
l_min_M1 = matrices->l_min_M1;
l_max_M1 = matrices->l_max_M1;
k_min_M1 = matrices->k_min_M1;
k_max_M1 = matrices->k_max_M1;
E_C_rem = matrices->E_C_rem;
E_M_rem = matrices->E_M_rem;
E_M1_rem = matrices->E_M1_rem;
#ifdef _OPENMP
#pragma omp parallel for private(d1,d2,cnt1,cnt2,cnt3,cnt4,j, i)
#endif
for (i = 1; i < seq_length - turn - 1; i++) {
/* guess memory requirements for M2 */
int min_k, max_k, max_l, min_l;
int min_k_real, max_k_real, *min_l_real, *max_l_real;
min_k = min_l = 0;
max_k = mm1[my_iindx[i] - seq_length] + referenceBPs1[my_iindx[i] - seq_length];
max_l = mm2[my_iindx[i] - seq_length] + referenceBPs2[my_iindx[i] - seq_length];
prepareBoundaries(min_k,
max_k,
min_l,
max_l,
bpdist[my_iindx[i] - seq_length],
&matrices->k_min_M2[i],
&matrices->k_max_M2[i],
&matrices->l_min_M2[i],
&matrices->l_max_M2[i]
);
prepareArray(&matrices->E_M2[i],
matrices->k_min_M2[i],
matrices->k_max_M2[i],
matrices->l_min_M2[i],
matrices->l_max_M2[i]
);
preparePosteriorBoundaries(matrices->k_max_M2[i] - matrices->k_min_M2[i] + 1,
matrices->k_min_M2[i],
&min_k_real,
&max_k_real,
&min_l_real,
&max_l_real
);
/* begin filling of M2 array */
for (j = i + turn + 1; j < seq_length - turn - 1; j++) {
if (E_M1_rem[my_iindx[i] - j] != INF) {
if (E_M1[my_iindx[j + 1] - seq_length]) {
for (cnt1 = k_min_M1[my_iindx[j + 1] - seq_length];
cnt1 <= k_max_M1[my_iindx[j + 1] - seq_length];
cnt1++)
for (cnt2 = l_min_M1[my_iindx[j + 1] - seq_length][cnt1];
cnt2 <= l_max_M1[my_iindx[j + 1] - seq_length][cnt1];
cnt2++)
matrices->E_M2_rem[i] = MIN2(matrices->E_M2_rem[i],
E_M1_rem[my_iindx[i] - j] + E_M1[my_iindx[j + 1] - seq_length][cnt1][cnt2 / 2]
);
}
if (E_M1_rem[my_iindx[j + 1] - seq_length] != INF)
matrices->E_M2_rem[i] = MIN2(matrices->E_M2_rem[i], E_M1_rem[my_iindx[i] - j] + E_M1_rem[my_iindx[j + 1] - seq_length]);
}
if (E_M1_rem[my_iindx[j + 1] - seq_length] != INF) {
if (E_M1[my_iindx[i] - j]) {
for (cnt1 = k_min_M1[my_iindx[i] - j];
cnt1 <= k_max_M1[my_iindx[i] - j];
cnt1++)
for (cnt2 = l_min_M1[my_iindx[i] - j][cnt1];
cnt2 <= l_max_M1[my_iindx[i] - j][cnt1];
cnt2 += 2)
matrices->E_M2_rem[i] = MIN2(matrices->E_M2_rem[i],
E_M1[my_iindx[i] - j][cnt1][cnt2 / 2] + E_M1_rem[my_iindx[j + 1] - seq_length]
);
}
}
if (!E_M1[my_iindx[i] - j])
continue;
if (!E_M1[my_iindx[j + 1] - seq_length])
continue;
d1 = referenceBPs1[my_iindx[i] - seq_length] - referenceBPs1[my_iindx[i] - j] - referenceBPs1[my_iindx[j + 1] - seq_length];
d2 = referenceBPs2[my_iindx[i] - seq_length] - referenceBPs2[my_iindx[i] - j] - referenceBPs2[my_iindx[j + 1] - seq_length];
for (cnt1 = k_min_M1[my_iindx[i] - j]; cnt1 <= k_max_M1[my_iindx[i] - j]; cnt1++)
for (cnt2 = l_min_M1[my_iindx[i] - j][cnt1]; cnt2 <= l_max_M1[my_iindx[i] - j][cnt1]; cnt2 += 2) {
for (cnt3 = k_min_M1[my_iindx[j + 1] - seq_length]; cnt3 <= k_max_M1[my_iindx[j + 1] - seq_length]; cnt3++)
for (cnt4 = l_min_M1[my_iindx[j + 1] - seq_length][cnt3]; cnt4 <= l_max_M1[my_iindx[j + 1] - seq_length][cnt3]; cnt4 += 2) {
if (((cnt1 + cnt3 + d1) <= maxD1) && ((cnt2 + cnt4 + d2) <= maxD2)) {
matrices->E_M2[i][cnt1 + cnt3 + d1][(cnt2 + cnt4 + d2) / 2] = MIN2(matrices->E_M2[i][cnt1 + cnt3 + d1][(cnt2 + cnt4 + d2) / 2],
E_M1[my_iindx[i] - j][cnt1][cnt2 / 2] + E_M1[my_iindx[j + 1] - seq_length][cnt3][cnt4 / 2]
);
updatePosteriorBoundaries(cnt1 + cnt3 + d1,
cnt2 + cnt4 + d2,
&min_k_real,
&max_k_real,
&min_l_real,
&max_l_real
);
} else {
matrices->E_M2_rem[i] = MIN2(matrices->E_M2_rem[i],
E_M1[my_iindx[i] - j][cnt1][cnt2 / 2] + E_M1[my_iindx[j + 1] - seq_length][cnt3][cnt4 / 2]
);
}
}
}
}
/* resize and move memory portions of energy matrix E_M2 */
adjustArrayBoundaries(&matrices->E_M2[i],
&matrices->k_min_M2[i],
&matrices->k_max_M2[i],
&matrices->l_min_M2[i],
&matrices->l_max_M2[i],
min_k_real,
max_k_real,
min_l_real,
max_l_real
);
} /* end for i */
base_d1 = referenceBPs1[my_iindx[1] - seq_length];
base_d2 = referenceBPs2[my_iindx[1] - seq_length];
/* guess memory requirements for E_FcH, E_FcI and E_FcM */
int min_k, max_k, max_l, min_l;
int min_k_real, max_k_real, min_k_real_fcH, max_k_real_fcH, min_k_real_fcI, max_k_real_fcI, min_k_real_fcM, max_k_real_fcM;
int *min_l_real, *max_l_real, *min_l_real_fcH, *max_l_real_fcH, *min_l_real_fcI, *max_l_real_fcI, *min_l_real_fcM, *max_l_real_fcM;
max_l_real_fcM = min_l_real_fcM = NULL;
max_l_real_fcI = min_l_real_fcI = NULL;
max_l_real_fcH = min_l_real_fcH = NULL;
max_l_real = min_l_real = NULL;
min_k = min_l = 0;
max_k = mm1[my_iindx[1] - seq_length] + referenceBPs1[my_iindx[1] - seq_length];
max_l = mm2[my_iindx[1] - seq_length] + referenceBPs2[my_iindx[1] - seq_length];
#ifdef _OPENMP
#pragma omp sections
{
#pragma omp section
{
#endif
prepareBoundaries(min_k,
max_k,
min_l,
max_l,
bpdist[my_iindx[1] - seq_length],
&matrices->k_min_Fc,
&matrices->k_max_Fc,
&matrices->l_min_Fc,
&matrices->l_max_Fc
);
prepareArray(&matrices->E_Fc,
matrices->k_min_Fc,
matrices->k_max_Fc,
matrices->l_min_Fc,
matrices->l_max_Fc
);
#ifdef _OPENMP
}
#pragma omp section
{
#endif
prepareBoundaries(min_k,
max_k,
min_l,
max_l,
bpdist[my_iindx[1] - seq_length],
&matrices->k_min_FcH,
&matrices->k_max_FcH,
&matrices->l_min_FcH,
&matrices->l_max_FcH
);
prepareArray(&matrices->E_FcH,
matrices->k_min_FcH,
matrices->k_max_FcH,
matrices->l_min_FcH,
matrices->l_max_FcH
);
#ifdef _OPENMP
}
#pragma omp section
{
#endif
prepareBoundaries(min_k,
max_k,
min_l,
max_l,
bpdist[my_iindx[1] - seq_length],
&matrices->k_min_FcI,
&matrices->k_max_FcI,
&matrices->l_min_FcI,
&matrices->l_max_FcI
);
prepareArray(&matrices->E_FcI,
matrices->k_min_FcI,
matrices->k_max_FcI,
matrices->l_min_FcI,
matrices->l_max_FcI
);
#ifdef _OPENMP
}
#pragma omp section
{
#endif
prepareBoundaries(min_k,
max_k,
min_l,
max_l,
bpdist[my_iindx[1] - seq_length],
&matrices->k_min_FcM,
&matrices->k_max_FcM,
&matrices->l_min_FcM,
&matrices->l_max_FcM
);
prepareArray(&matrices->E_FcM,
matrices->k_min_FcM,
matrices->k_max_FcM,
matrices->l_min_FcM,
matrices->l_max_FcM
);
#ifdef _OPENMP
}
#pragma omp section
{
#endif
preparePosteriorBoundaries(max_k - min_k + 1,
min_k,
&min_k_real,
&max_k_real,
&min_l_real,
&max_l_real
);
#ifdef _OPENMP
}
#pragma omp section
{
#endif
preparePosteriorBoundaries(max_k - min_k + 1,
min_k,
&min_k_real_fcH,
&max_k_real_fcH,
&min_l_real_fcH,
&max_l_real_fcH
);
#ifdef _OPENMP
}
#pragma omp section
{
#endif
preparePosteriorBoundaries(max_k - min_k + 1,
min_k,
&min_k_real_fcI,
&max_k_real_fcI,
&min_l_real_fcI,
&max_l_real_fcI
);
#ifdef _OPENMP
}
#pragma omp section
{
#endif
preparePosteriorBoundaries(max_k - min_k + 1,
min_k,
&min_k_real_fcM,
&max_k_real_fcM,
&min_l_real_fcM,
&max_l_real_fcM
);
#ifdef _OPENMP
}
}
#endif
/* begin actual energy calculations */
#ifdef _OPENMP
#pragma omp sections private(d, d1,d2,cnt1,cnt2,cnt3,cnt4,j, i, energy)
{
#pragma omp section
{
#endif
for (d = turn + 2; d <= seq_length; d++) /* i,j in [1..length] */
for (j = d; j <= seq_length; j++) {
unsigned int u, ij;
int type, no_close;
char loopseq[10];
i = j - d + 1;
ij = my_iindx[i] - j;
u = seq_length - j + i - 1;
if (u < turn)
continue;
type = ptype[jindx[j] + i];
no_close = (((type == 3) || (type == 4)) && no_closingGU);
type = rtype[type];
if (!type)
continue;
if (no_close)
continue;
d1 = base_d1 - referenceBPs1[ij];
d2 = base_d2 - referenceBPs2[ij];
if (u < 7) {
strcpy(loopseq, sequence + j - 1);
strncat(loopseq, sequence, i);
}
energy = E_Hairpin(u, type, S1[j + 1], S1[i - 1], loopseq, P);
if (E_C_rem[ij] != INF)
matrices->E_FcH_rem = MIN2(matrices->E_FcH_rem, E_C_rem[ij] + energy);
if (!E_C[ij])
continue;
for (cnt1 = k_min_C[ij]; cnt1 <= k_max_C[ij]; cnt1++)
for (cnt2 = l_min_C[ij][cnt1]; cnt2 <= l_max_C[ij][cnt1]; cnt2 += 2) {
if (((cnt1 + d1) <= maxD1) && ((cnt2 + d2) <= maxD2)) {
matrices->E_FcH[cnt1 + d1][(cnt2 + d2) / 2] = MIN2(matrices->E_FcH[cnt1 + d1][(cnt2 + d2) / 2],
energy + E_C[ij][cnt1][cnt2 / 2]
);
updatePosteriorBoundaries(cnt1 + d1,
cnt2 + d2,
&min_k_real_fcH,
&max_k_real_fcH,
&min_l_real_fcH,
&max_l_real_fcH
);
} else {
matrices->E_FcH_rem = MIN2(matrices->E_FcH_rem, energy + E_C[ij][cnt1][cnt2 / 2]);
}
}
}
/* end of i-j loop */
/* resize and move memory portions of energy matrix E_FcH */
adjustArrayBoundaries(&matrices->E_FcH,
&matrices->k_min_FcH,
&matrices->k_max_FcH,
&matrices->l_min_FcH,
&matrices->l_max_FcH,
min_k_real_fcH,
max_k_real_fcH,
min_l_real_fcH,
max_l_real_fcH
);
#ifdef _OPENMP
}
#pragma omp section
{
#endif
for (d = turn + 2; d <= seq_length; d++) /* i,j in [1..length] */
for (j = d; j <= seq_length; j++) {
unsigned int u, ij, p, q, pq;
int type, type_2, no_close;
i = j - d + 1;
ij = my_iindx[i] - j;
u = seq_length - j + i - 1;
if (u < turn)
continue;
type = ptype[jindx[j] + i];
no_close = (((type == 3) || (type == 4)) && no_closingGU);
type = rtype[type];
if (!type)
continue;
if (no_close)
continue;
if (E_C_rem[ij] != INF) {
for (p = j + 1; p < seq_length; p++) {
unsigned int u1, qmin, ln_pre;
u1 = p - j - 1;
if (u1 + i - 1 > MAXLOOP)
break;
qmin = p + turn + 1;
ln_pre = u1 + i + seq_length;
if (ln_pre > qmin + MAXLOOP)
qmin = ln_pre - MAXLOOP - 1;
for (q = qmin; q <= seq_length; q++) {
unsigned int u2;
pq = my_iindx[p] - q;
type_2 = rtype[(unsigned int)ptype[jindx[q] + p]];
if (type_2 == 0)
continue;
u2 = i - 1 + seq_length - q;
if (u1 + u2 > MAXLOOP)
continue;
/* get distance to reference if closing the interior loop
* d2a = dbp(T1_[1,n}, T1_{p,q} + T1_{i,j})
* d2b = dbp(T2_[1,n}, T2_{p,q} + T2_{i,j})
*/
d1 = base_d1 - referenceBPs1[ij] - referenceBPs1[pq];
d2 = base_d2 - referenceBPs2[ij] - referenceBPs2[pq];
energy = E_IntLoop(u1, u2, type, type_2, S1[j + 1], S1[i - 1], S1[p - 1], S1[q + 1], P);
if (E_C_rem[pq] != INF)
matrices->E_FcI_rem = MIN2(matrices->E_FcI_rem, E_C_rem[ij] + E_C_rem[pq] + energy);
if (E_C[pq]) {
for (cnt1 = k_min_C[pq];
cnt1 <= k_max_C[pq];
cnt1++)
for (cnt2 = l_min_C[pq][cnt1];
cnt2 <= l_max_C[pq][cnt1];
cnt2 += 2)
matrices->E_FcI_rem = MIN2(matrices->E_FcI_rem, E_C_rem[ij] + E_C[pq][cnt1][cnt2 / 2] + energy);
}
}
}
}
if (E_C[ij]) {
for (p = j + 1; p < seq_length; p++) {
unsigned int u1, qmin, ln_pre;
u1 = p - j - 1;
if (u1 + i - 1 > MAXLOOP)
break;
qmin = p + turn + 1;
ln_pre = u1 + i + seq_length;
if (ln_pre > qmin + MAXLOOP)
qmin = ln_pre - MAXLOOP - 1;
for (q = qmin; q <= seq_length; q++) {
unsigned int u2;
pq = my_iindx[p] - q;
type_2 = rtype[(unsigned int)ptype[jindx[q] + p]];
if (type_2 == 0)
continue;
u2 = i - 1 + seq_length - q;
if (u1 + u2 > MAXLOOP)
continue;
/* get distance to reference if closing the interior loop
* d2a = dbp(T1_[1,n}, T1_{p,q} + T1_{i,j})
* d2b = dbp(T2_[1,n}, T2_{p,q} + T2_{i,j})
*/
d1 = base_d1 - referenceBPs1[ij] - referenceBPs1[pq];
d2 = base_d2 - referenceBPs2[ij] - referenceBPs2[pq];
energy = E_IntLoop(u1, u2, type, type_2, S1[j + 1], S1[i - 1], S1[p - 1], S1[q + 1], P);
if (E_C_rem[pq] != INF) {
for (cnt1 = k_min_C[ij];
cnt1 <= k_max_C[ij];
cnt1++)
for (cnt2 = l_min_C[ij][cnt1];
cnt2 <= l_max_C[ij][cnt1];
cnt2 += 2)
matrices->E_FcI_rem = MIN2(matrices->E_FcI_rem, E_C[ij][cnt1][cnt2 / 2] + E_C_rem[pq] + energy);
}
if (E_C[pq]) {
for (cnt1 = k_min_C[ij];
cnt1 <= k_max_C[ij];
cnt1++)
for (cnt2 = l_min_C[ij][cnt1];
cnt2 <= l_max_C[ij][cnt1];
cnt2 += 2)
for (cnt3 = k_min_C[pq];
cnt3 <= k_max_C[pq];
cnt3++)
for (cnt4 = l_min_C[pq][cnt3];
cnt4 <= l_max_C[pq][cnt3];
cnt4 += 2) {
if (((cnt1 + cnt3 + d1) <= maxD1) && ((cnt2 + cnt4 + d2) <= maxD2)) {
matrices->E_FcI[cnt1 + cnt3 + d1][(cnt2 + cnt4 + d2) / 2] = MIN2(
matrices->E_FcI[cnt1 + cnt3 + d1][(cnt2 + cnt4 + d2) / 2],
E_C[ij][cnt1][cnt2 / 2]
+ E_C[pq][cnt3][cnt4 / 2]
+ energy
);
updatePosteriorBoundaries(cnt1 + cnt3 + d1,
cnt2 + cnt4 + d2,
&min_k_real_fcI,
&max_k_real_fcI,
&min_l_real_fcI,
&max_l_real_fcI
);
} else {
matrices->E_FcI_rem = MIN2(
matrices->E_FcI_rem,
E_C[ij][cnt1][cnt2 / 2]
+ E_C[pq][cnt3][cnt4 / 2]
+ energy
);
}
}
}
}
}
}
}
/* end of i-j loop */
/* resize and move memory portions of energy matrix E_FcI */
adjustArrayBoundaries(&matrices->E_FcI,
&matrices->k_min_FcI,
&matrices->k_max_FcI,
&matrices->l_min_FcI,
&matrices->l_max_FcI,
min_k_real_fcI,
max_k_real_fcI,
min_l_real_fcI,
max_l_real_fcI
);
#ifdef _OPENMP
}
#pragma omp section
{
#endif
if (seq_length > 2 * turn) {
for (i = turn + 1; i < seq_length - 2 * turn; i++) {
/* get distancies to references
* d3a = dbp(T1_[1,n}, T1_{1,k} + T1_{k+1, n})
* d3b = dbp(T2_[1,n}, T2_{1,k} + T2_{k+1, n})
*/
d1 = base_d1 - referenceBPs1[my_iindx[1] - i] - referenceBPs1[my_iindx[i + 1] - seq_length];
d2 = base_d2 - referenceBPs2[my_iindx[1] - i] - referenceBPs2[my_iindx[i + 1] - seq_length];
if (E_M_rem[my_iindx[1] - i] != INF) {
if (matrices->E_M2[i + 1]) {
for (cnt1 = matrices->k_min_M2[i + 1];
cnt1 <= matrices->k_max_M2[i + 1];
cnt1++)
for (cnt2 = matrices->l_min_M2[i + 1][cnt1];
cnt2 <= matrices->l_max_M2[i + 1][cnt1];
cnt2 += 2)
matrices->E_FcM_rem = MIN2(matrices->E_FcM_rem, E_M_rem[my_iindx[1] - i] + matrices->E_M2[i + 1][cnt1][cnt2 / 2] + P->MLclosing);
}
if (matrices->E_M2_rem[i + 1] != INF)
matrices->E_FcM_rem = MIN2(matrices->E_FcM_rem, E_M_rem[my_iindx[1] - i] + matrices->E_M2_rem[i + 1] + P->MLclosing);
}
if (matrices->E_M2_rem[i + 1] != INF) {
if (E_M[my_iindx[1] - i]) {
for (cnt1 = k_min_M[my_iindx[1] - i];
cnt1 <= k_max_M[my_iindx[1] - i];
cnt1++)
for (cnt2 = l_min_M[my_iindx[1] - i][cnt1];
cnt2 <= l_max_M[my_iindx[1] - i][cnt1];
cnt2 += 2)
matrices->E_FcM_rem = MIN2(matrices->E_FcM_rem, E_M[my_iindx[1] - i][cnt1][cnt2 / 2] + matrices->E_M2_rem[i + 1] + P->MLclosing);
}
}
if (!E_M[my_iindx[1] - i])
continue;
if (!matrices->E_M2[i + 1])
continue;
for (cnt1 = k_min_M[my_iindx[1] - i]; cnt1 <= k_max_M[my_iindx[1] - i]; cnt1++)
for (cnt2 = l_min_M[my_iindx[1] - i][cnt1]; cnt2 <= l_max_M[my_iindx[1] - i][cnt1]; cnt2 += 2)
for (cnt3 = matrices->k_min_M2[i + 1]; cnt3 <= matrices->k_max_M2[i + 1]; cnt3++)
for (cnt4 = matrices->l_min_M2[i + 1][cnt3]; cnt4 <= matrices->l_max_M2[i + 1][cnt3]; cnt4 += 2) {
if (((cnt1 + cnt3 + d1) <= maxD1) && ((cnt2 + cnt4 + d2) <= maxD2)) {
matrices->E_FcM[cnt1 + cnt3 + d1][(cnt2 + cnt4 + d2) / 2] = MIN2(
matrices->E_FcM[cnt1 + cnt3 + d1][(cnt2 + cnt4 + d2) / 2],
E_M[my_iindx[1] - i][cnt1][cnt2 / 2]
+ matrices->E_M2[i + 1][cnt3][cnt4 / 2]
+ P->MLclosing
);
updatePosteriorBoundaries(cnt1 + cnt3 + d1,
cnt2 + cnt4 + d2,
&min_k_real_fcM,
&max_k_real_fcM,
&min_l_real_fcM,
&max_l_real_fcM
);
} else {
matrices->E_FcM_rem = MIN2(
matrices->E_FcM_rem,
E_M[my_iindx[1] - i][cnt1][cnt2 / 2]
+ matrices->E_M2[i + 1][cnt3][cnt4 / 2]
+ P->MLclosing
);
}
}
}
}
/* resize and move memory portions of energy matrix E_FcM */
adjustArrayBoundaries(&matrices->E_FcM,
&matrices->k_min_FcM,
&matrices->k_max_FcM,
&matrices->l_min_FcM,
&matrices->l_max_FcM,
min_k_real_fcM,
max_k_real_fcM,
min_l_real_fcM,
max_l_real_fcM
);
#ifdef _OPENMP
}
}
#endif
/* compute E_Fc_rem */
matrices->E_Fc_rem = MIN2(matrices->E_FcH_rem, matrices->E_FcI_rem);
matrices->E_Fc_rem = MIN2(matrices->E_Fc_rem, matrices->E_FcM_rem);
/* add the case were structure is unfolded chain */
if ((referenceBPs1[my_iindx[1] - seq_length] > maxD1) || (referenceBPs2[my_iindx[1] - seq_length] > maxD2))
matrices->E_Fc_rem = MIN2(matrices->E_Fc_rem, 0);
/* compute all E_Fc */
for (cnt1 = matrices->k_min_FcH; cnt1 <= matrices->k_max_FcH; cnt1++)
for (cnt2 = matrices->l_min_FcH[cnt1]; cnt2 <= matrices->l_max_FcH[cnt1]; cnt2 += 2) {
matrices->E_Fc[cnt1][cnt2 / 2] = MIN2(matrices->E_Fc[cnt1][cnt2 / 2],
matrices->E_FcH[cnt1][cnt2 / 2]
);
updatePosteriorBoundaries(cnt1,
cnt2,
&min_k_real,
&max_k_real,
&min_l_real,
&max_l_real
);
}
for (cnt1 = matrices->k_min_FcI; cnt1 <= matrices->k_max_FcI; cnt1++)
for (cnt2 = matrices->l_min_FcI[cnt1]; cnt2 <= matrices->l_max_FcI[cnt1]; cnt2 += 2) {
matrices->E_Fc[cnt1][cnt2 / 2] = MIN2(matrices->E_Fc[cnt1][cnt2 / 2],
matrices->E_FcI[cnt1][cnt2 / 2]
);
updatePosteriorBoundaries(cnt1,
cnt2,
&min_k_real,
&max_k_real,
&min_l_real,
&max_l_real
);
}
for (cnt1 = matrices->k_min_FcM; cnt1 <= matrices->k_max_FcM; cnt1++)
for (cnt2 = matrices->l_min_FcM[cnt1]; cnt2 <= matrices->l_max_FcM[cnt1]; cnt2 += 2) {
matrices->E_Fc[cnt1][cnt2 / 2] = MIN2(matrices->E_Fc[cnt1][cnt2 / 2],
matrices->E_FcM[cnt1][cnt2 / 2]
);
updatePosteriorBoundaries(cnt1,
cnt2,
&min_k_real,
&max_k_real,
&min_l_real,
&max_l_real
);
}
/* add the case were structure is unfolded chain */
matrices->E_Fc[referenceBPs1[my_iindx[1] - seq_length]][referenceBPs2[my_iindx[1] - seq_length] / 2] = MIN2(matrices->E_Fc[referenceBPs1[my_iindx[1] - seq_length]][referenceBPs2[my_iindx[1] - seq_length] / 2],
0);
updatePosteriorBoundaries(referenceBPs1[my_iindx[1] - seq_length],
referenceBPs2[my_iindx[1] - seq_length],
&min_k_real,
&max_k_real,
&min_l_real,
&max_l_real
);
adjustArrayBoundaries(&matrices->E_Fc,
&matrices->k_min_Fc,
&matrices->k_max_Fc,
&matrices->l_min_Fc,
&matrices->l_max_Fc,
min_k_real,
max_k_real,
min_l_real,
max_l_real
);
}
PRIVATE void
adjustArrayBoundaries(int ***array,
int *k_min,
int *k_max,
int **l_min,
int **l_max,
int k_min_post,
int k_max_post,
int *l_min_post,
int *l_max_post)
{
int cnt1;
int k_diff_pre = k_min_post - *k_min;
int mem_size = k_max_post - k_min_post + 1;
if (k_min_post < INF) {
/* free all the unused memory behind actual data */
for (cnt1 = k_max_post + 1; cnt1 <= *k_max; cnt1++) {
(*array)[cnt1] += (*l_min)[cnt1] / 2;
free((*array)[cnt1]);
}
/* free unused memory before actual data */
for (cnt1 = *k_min; cnt1 < k_min_post; cnt1++) {
(*array)[cnt1] += (*l_min)[cnt1] / 2;
free((*array)[cnt1]);
}
/* move data to front and thereby eliminating unused memory in front of actual data */
if (k_diff_pre > 0) {
memmove((int **)(*array), ((int **)(*array)) + k_diff_pre, sizeof(int *) * mem_size);
memmove((int *)(*l_min), ((int *)(*l_min)) + k_diff_pre, sizeof(int) * mem_size);
memmove((int *)(*l_max), ((int *)(*l_max)) + k_diff_pre, sizeof(int) * mem_size);
}
/* reallocating memory to actual size used */
*array += *k_min;
*array = (int **)realloc(*array, sizeof(int *) * mem_size);
*array -= k_min_post;
*l_min += *k_min;
*l_min = (int *)realloc(*l_min, sizeof(int) * mem_size);
*l_min -= k_min_post;
*l_max += *k_min;
*l_max = (int *)realloc(*l_max, sizeof(int) * mem_size);
*l_max -= k_min_post;
/* adjust l dimension of array */
for (cnt1 = k_min_post; cnt1 <= k_max_post; cnt1++) {
if (l_min_post[cnt1] < INF) {
/* new memsize */
mem_size = (l_max_post[cnt1] - l_min_post[cnt1] + 1) / 2 + 1;
/* reshift the pointer */
(*array)[cnt1] += (*l_min)[cnt1] / 2;
int shift = (l_min_post[cnt1] % 2 == (*l_min)[cnt1] % 2) ? 0 : 1;
/* eliminate unused memory in front of actual data */
unsigned int start = (l_min_post[cnt1] - (*l_min)[cnt1]) / 2 + shift;
if (start > 0)
memmove((int *)((*array)[cnt1]), (int *)((*array)[cnt1]) + start, sizeof(int) * mem_size);
(*array)[cnt1] = (int *)realloc((*array)[cnt1], sizeof(int) * mem_size);
(*array)[cnt1] -= l_min_post[cnt1] / 2;
} else {
/* free according memory */
(*array)[cnt1] += (*l_min)[cnt1] / 2;
free((*array)[cnt1]);
}
(*l_min)[cnt1] = l_min_post[cnt1];
(*l_max)[cnt1] = l_max_post[cnt1];
}
} else {
/* we have to free all unused memory */
for (cnt1 = *k_min; cnt1 <= *k_max; cnt1++) {
(*array)[cnt1] += (*l_min)[cnt1] / 2;
free((*array)[cnt1]);
}
(*l_min) += *k_min;
(*l_max) += *k_min;
free(*l_min);
free(*l_max);
(*array) += *k_min;
free(*array);
*array = NULL;
}
l_min_post += *k_min;
l_max_post += *k_min;
free(l_min_post);
free(l_max_post);
*k_min = k_min_post;
*k_max = k_max_post;
}
PRIVATE INLINE void
preparePosteriorBoundaries(int size,
int shift,
int *min_k,
int *max_k,
int **min_l,
int **max_l)
{
int i;
*min_k = INF;
*max_k = 0;
*min_l = (int *)vrna_alloc(sizeof(int) * size);
*max_l = (int *)vrna_alloc(sizeof(int) * size);
for (i = 0; i < size; i++) {
(*min_l)[i] = INF;
(*max_l)[i] = 0;
}
*min_l -= shift;
*max_l -= shift;
}
PRIVATE INLINE void
updatePosteriorBoundaries(int d1,
int d2,
int *min_k,
int *max_k,
int **min_l,
int **max_l)
{
(*min_l)[d1] = MIN2((*min_l)[d1], d2);
(*max_l)[d1] = MAX2((*max_l)[d1], d2);
*min_k = MIN2(*min_k, d1);
*max_k = MAX2(*max_k, d1);
}
INLINE PRIVATE void
prepareBoundaries(int min_k_pre,
int max_k_pre,
int min_l_pre,
int max_l_pre,
int bpdist,
int *min_k,
int *max_k,
int **min_l,
int **max_l)
{
int cnt;
int mem = max_k_pre - min_k_pre + 1;
*min_k = min_k_pre;
*max_k = max_k_pre;
*min_l = (int *)vrna_alloc(sizeof(int) * mem);
*max_l = (int *)vrna_alloc(sizeof(int) * mem);
*min_l -= min_k_pre;
*max_l -= min_k_pre;
/* for each k guess the according minimum l*/
for (cnt = min_k_pre; cnt <= max_k_pre; cnt++) {
(*min_l)[cnt] = min_l_pre;
(*max_l)[cnt] = max_l_pre;
while ((*min_l)[cnt] + cnt < bpdist)
(*min_l)[cnt]++;
if ((bpdist % 2) != (((*min_l)[cnt] + cnt) % 2))
(*min_l)[cnt]++;
}
}
INLINE PRIVATE void
prepareArray(int ***array,
int min_k,
int max_k,
int *min_l,
int *max_l)
{
int i, j, mem;
*array = (int **)vrna_alloc(sizeof(int *) * (max_k - min_k + 1));
*array -= min_k;
for (i = min_k; i <= max_k; i++) {
mem = (max_l[i] - min_l[i] + 1) / 2 + 1;
(*array)[i] = (int *)vrna_alloc(sizeof(int) * mem);
for (j = 0; j < mem; j++)
(*array)[i][j] = INF;
(*array)[i] -= min_l[i] / 2;
}
}
INLINE PRIVATE void
prepareArray2(unsigned long ***array,
int min_k,
int max_k,
int *min_l,
int *max_l)
{
int i, mem;
*array = (unsigned long **)vrna_alloc(sizeof(unsigned long *) * (max_k - min_k + 1));
*array -= min_k;
for (i = min_k; i <= max_k; i++) {
mem = (max_l[i] - min_l[i] + 1) / 2 + 1;
(*array)[i] = (unsigned long *)vrna_alloc(sizeof(unsigned long) * mem);
(*array)[i] -= min_l[i] / 2;
}
}
/*
#################################
# OLD API support #
#################################
*/
/* crosslink data from vars->compatibility to TwoDfold_vars structure */
PRIVATE INLINE void
crosslink(TwoDfold_vars *vars)
{
vrna_fold_compound_t *c;
vrna_mx_mfe_t *m;
c = vars->compatibility;
m = c->matrices;
vars->sequence = c->sequence;
vars->seq_length = c->length;
vars->reference_pt1 = c->reference_pt1;
vars->reference_pt2 = c->reference_pt2;
vars->referenceBPs1 = c->referenceBPs1;
vars->referenceBPs2 = c->referenceBPs2;
vars->bpdist = c->bpdist;
vars->do_backtrack = 1;
vars->dangles = c->params->model_details.dangles;
vars->circ = c->params->model_details.circ;
vars->temperature = c->params->model_details.temperature;
vars->ptype = c->ptype_pf_compat;
vars->P = c->params;
vars->S = c->sequence_encoding2;
vars->S1 = c->sequence_encoding;
vars->my_iindx = c->iindx;
vars->mm1 = c->mm1;
vars->mm2 = c->mm2;
vars->maxD1 = c->maxD1;
vars->maxD2 = c->maxD2;
vars->E_C = m->E_C;
vars->l_min_values = m->l_min_C;
vars->l_max_values = m->l_max_C;
vars->k_min_values = m->k_min_C;
vars->k_max_values = m->k_max_C;
vars->E_F5 = m->E_F5;
vars->l_min_values_f = m->l_min_F5;
vars->l_max_values_f = m->l_max_F5;
vars->k_min_values_f = m->k_min_F5;
vars->k_max_values_f = m->k_max_F5;
vars->E_F3 = m->E_F3;
vars->l_min_values_f3 = m->l_min_F3;
vars->l_max_values_f3 = m->l_max_F3;
vars->k_min_values_f3 = m->k_min_F3;
vars->k_max_values_f3 = m->k_max_F3;
vars->E_M = m->E_M;
vars->l_min_values_m = m->l_min_M;
vars->l_max_values_m = m->l_max_M;
vars->k_min_values_m = m->k_min_M;
vars->k_max_values_m = m->k_max_M;
vars->E_M1 = m->E_M1;
vars->l_min_values_m1 = m->l_min_M1;
vars->l_max_values_m1 = m->l_max_M1;
vars->k_min_values_m1 = m->k_min_M1;
vars->k_max_values_m1 = m->k_max_M1;
#ifdef COUNT_STATES
vars->N_C = m->N_C;
vars->N_F5 = m->N_F5;
vars->N_M = m->N_M;
vars->N_M1 = m->N_M1;
#endif
vars->E_M2_rem = m->E_M2_rem;
vars->E_M2 = m->E_M2;
vars->l_min_values_m2 = m->l_min_M2;
vars->l_max_values_m2 = m->l_max_M2;
vars->k_min_values_m2 = m->k_min_M2;
vars->k_max_values_m2 = m->k_max_M2;
vars->E_Fc = m->E_Fc;
vars->E_FcH = m->E_FcH;
vars->E_FcI = m->E_FcI;
vars->E_FcM = m->E_FcM;
vars->E_Fc_rem = m->E_Fc_rem;
vars->E_FcH_rem = m->E_FcH_rem;
vars->E_FcI_rem = m->E_FcI_rem;
vars->E_FcM_rem = m->E_FcM_rem;
vars->E_C_rem = m->E_C_rem;
vars->E_M_rem = m->E_M_rem;
vars->E_M1_rem = m->E_M1_rem;
vars->E_F5_rem = m->E_F5_rem;
}
PUBLIC TwoDfold_vars *
get_TwoDfold_variables(const char *seq,
const char *structure1,
const char *structure2,
int circ)
{
vrna_md_t md;
TwoDfold_vars *vars;
set_model_details(&md);
md.circ = circ;
vars = (TwoDfold_vars *)vrna_alloc(sizeof(TwoDfold_vars));
vars->compatibility = vrna_fold_compound_TwoD(seq, structure1, structure2, &md, VRNA_OPTION_MFE);
crosslink(vars);
return vars;
}
PUBLIC char *
TwoDfold_backtrack_f5(unsigned int j,
int k,
int l,
TwoDfold_vars *vars)
{
return vrna_backtrack5_TwoD(vars->compatibility, k, l, j);
}
PUBLIC void
destroy_TwoDfold_variables(TwoDfold_vars *vars)
{
if (vars == NULL)
return;
vrna_fold_compound_free(vars->compatibility);
free(vars);
}
PUBLIC vrna_sol_TwoD_t *
TwoDfoldList(TwoDfold_vars *vars,
int distance1,
int distance2)
{
vrna_sol_TwoD_t *sol;
sol = vrna_mfe_TwoD(vars->compatibility, distance1, distance2);
crosslink(vars);
return sol;
}
PUBLIC void
update_TwoDfold_params(TwoDfold_vars *vars)
{
vrna_md_t md;
set_model_details(&md);
free(vars->compatibility->params);
vars->compatibility->params = vrna_params(&md);
crosslink(vars);
}
|
GB_binop__first_int8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__first_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__first_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__first_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__first_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__first_int8)
// A*D function (colscale): GB (_AxD__first_int8)
// D*A function (rowscale): GB (_DxB__first_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__first_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__first_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__first_int8)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: int8_t
// A type: int8_t
// A pattern? 0
// B type: int8_t
// B pattern? 1
// BinaryOp: cij = aij
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
;
// true if values of B are not used
#define GB_B_IS_PATTERN \
1 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = x ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_FIRST || GxB_NO_INT8 || GxB_NO_FIRST_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__first_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__first_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__first_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__first_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__first_int8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__first_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int8_t alpha_scalar ;
int8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int8_t *) alpha_scalar_in)) ;
beta_scalar = (*((int8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__first_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__first_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__first_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__first_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
; ;
Cx [p] = x ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = aij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = x ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = aij ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
deprecate.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD EEEEE PPPP RRRR EEEEE CCCC AAA TTTTT EEEEE %
% D D E P P R R E C A A T E %
% D D EEE PPPPP RRRR EEE C AAAAA T EEE %
% D D E P R R E C A A T E %
% DDDD EEEEE P R R EEEEE CCCC A A T EEEEE %
% %
% %
% MagickWand Deprecated Methods %
% %
% Software Design %
% Cristy %
% October 2002 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "wand/studio.h"
#include "wand/MagickWand.h"
#include "wand/magick-wand-private.h"
#include "wand/wand.h"
#include "magick/monitor-private.h"
#include "magick/thread-private.h"
/*
Define declarations.
*/
#define PixelViewId "PixelView"
/*
Typedef declarations.
*/
struct _PixelView
{
size_t
id;
char
name[MaxTextExtent];
ExceptionInfo
*exception;
MagickWand
*wand;
CacheView
*view;
RectangleInfo
region;
size_t
number_threads;
PixelWand
***pixel_wands;
MagickBooleanType
debug;
size_t
signature;
};
#if !defined(MAGICKCORE_EXCLUDE_DEPRECATED)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w A l l o c a t e W a n d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawAllocateWand() allocates an initial drawing wand which is an opaque
% handle required by the remaining drawing methods.
%
% The format of the DrawAllocateWand method is:
%
% DrawingWand DrawAllocateWand(const DrawInfo *draw_info,Image *image)
%
% A description of each parameter follows:
%
% o draw_info: Initial drawing defaults. Set to NULL to use defaults.
%
% o image: the image to draw on.
%
*/
WandExport DrawingWand *DrawAllocateWand(const DrawInfo *draw_info,Image *image)
{
return(AcquireDrawingWand(draw_info,image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k A v e r a g e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickAverageImages() average a set of images.
%
% The format of the MagickAverageImages method is:
%
% MagickWand *MagickAverageImages(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
static MagickWand *CloneMagickWandFromImages(const MagickWand *wand,
Image *images)
{
MagickWand
*clone_wand;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
clone_wand=(MagickWand *) AcquireMagickMemory(sizeof(*clone_wand));
if (clone_wand == (MagickWand *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
images->filename);
(void) memset(clone_wand,0,sizeof(*clone_wand));
clone_wand->id=AcquireWandId();
(void) FormatLocaleString(clone_wand->name,MaxTextExtent,"%s-%.20g",
MagickWandId,(double) clone_wand->id);
clone_wand->exception=AcquireExceptionInfo();
InheritException(clone_wand->exception,wand->exception);
clone_wand->image_info=CloneImageInfo(wand->image_info);
clone_wand->quantize_info=CloneQuantizeInfo(wand->quantize_info);
clone_wand->images=images;
clone_wand->debug=IsEventLogging();
if (clone_wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_wand->name);
clone_wand->signature=WandSignature;
return(clone_wand);
}
WandExport MagickWand *MagickAverageImages(MagickWand *wand)
{
Image
*average_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
return((MagickWand *) NULL);
average_image=EvaluateImages(wand->images,MeanEvaluateOperator,
wand->exception);
if (average_image == (Image *) NULL)
return((MagickWand *) NULL);
return(CloneMagickWandFromImages(wand,average_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e P i x e l V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelView() makes a copy of the specified pixel view.
%
% The format of the ClonePixelView method is:
%
% PixelView *ClonePixelView(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport PixelView *ClonePixelView(const PixelView *pixel_view)
{
PixelView
*clone_view;
register ssize_t
i;
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
if (pixel_view->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",pixel_view->name);
clone_view=(PixelView *) AcquireMagickMemory(sizeof(*clone_view));
if (clone_view == (PixelView *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
pixel_view->name);
(void) memset(clone_view,0,sizeof(*clone_view));
clone_view->id=AcquireWandId();
(void) FormatLocaleString(clone_view->name,MaxTextExtent,"%s-%.20g",
PixelViewId,(double) clone_view->id);
clone_view->exception=AcquireExceptionInfo();
InheritException(clone_view->exception,pixel_view->exception);
clone_view->view=CloneCacheView(pixel_view->view);
clone_view->region=pixel_view->region;
clone_view->number_threads=pixel_view->number_threads;
for (i=0; i < (ssize_t) pixel_view->number_threads; i++)
clone_view->pixel_wands[i]=ClonePixelWands((const PixelWand **)
pixel_view->pixel_wands[i],pixel_view->region.width);
clone_view->debug=pixel_view->debug;
if (clone_view->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_view->name);
clone_view->signature=WandSignature;
return(clone_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y P i x e l V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelView() deallocates memory associated with a pixel view.
%
% The format of the DestroyPixelView method is:
%
% PixelView *DestroyPixelView(PixelView *pixel_view,
% const size_t number_wands,const size_t number_threads)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
% o number_wand: the number of pixel wands.
%
% o number_threads: number of threads.
%
*/
static PixelWand ***DestroyPixelsThreadSet(PixelWand ***pixel_wands,
const size_t number_wands,const size_t number_threads)
{
register ssize_t
i;
assert(pixel_wands != (PixelWand ***) NULL);
for (i=0; i < (ssize_t) number_threads; i++)
if (pixel_wands[i] != (PixelWand **) NULL)
pixel_wands[i]=DestroyPixelWands(pixel_wands[i],number_wands);
pixel_wands=(PixelWand ***) RelinquishMagickMemory(pixel_wands);
return(pixel_wands);
}
WandExport PixelView *DestroyPixelView(PixelView *pixel_view)
{
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
pixel_view->pixel_wands=DestroyPixelsThreadSet(pixel_view->pixel_wands,
pixel_view->region.width,pixel_view->number_threads);
pixel_view->view=DestroyCacheView(pixel_view->view);
pixel_view->exception=DestroyExceptionInfo(pixel_view->exception);
pixel_view->signature=(~WandSignature);
RelinquishWandId(pixel_view->id);
pixel_view=(PixelView *) RelinquishMagickMemory(pixel_view);
return(pixel_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D u p l e x T r a n s f e r P i x e l V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DuplexTransferPixelViewIterator() iterates over three pixel views in
% parallel and calls your transfer method for each scanline of the view. The
% source and duplex pixel region is not confined to the image canvas-- that is
% you can include negative offsets or widths or heights that exceed the image
% dimension. However, the destination pixel view is confined to the image
% canvas-- that is no negative offsets or widths or heights that exceed the
% image dimension are permitted.
%
% Use this pragma:
%
% #pragma omp critical
%
% to define a section of code in your callback transfer method that must be
% executed by a single thread at a time.
%
% The format of the DuplexTransferPixelViewIterator method is:
%
% MagickBooleanType DuplexTransferPixelViewIterator(PixelView *source,
% PixelView *duplex,PixelView *destination,
% DuplexTransferPixelViewMethod transfer,void *context)
%
% A description of each parameter follows:
%
% o source: the source pixel view.
%
% o duplex: the duplex pixel view.
%
% o destination: the destination pixel view.
%
% o transfer: the transfer callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType DuplexTransferPixelViewIterator(
PixelView *source,PixelView *duplex,PixelView *destination,
DuplexTransferPixelViewMethod transfer,void *context)
{
#define DuplexTransferPixelViewTag "PixelView/DuplexTransfer"
ExceptionInfo
*exception;
Image
*destination_image,
*duplex_image,
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(source != (PixelView *) NULL);
assert(source->signature == WandSignature);
if (transfer == (DuplexTransferPixelViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
duplex_image=duplex->wand->images;
destination_image=destination->wand->images;
if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=destination->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status)
#endif
for (y=source->region.y; y < (ssize_t) source->region.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const IndexPacket
*magick_restrict duplex_indexes,
*magick_restrict indexes;
register const PixelPacket
*magick_restrict duplex_pixels,
*magick_restrict pixels;
register IndexPacket
*magick_restrict destination_indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict destination_pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y,
source->region.width,1,source->exception);
if (pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(source->view);
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],
GetPixelIndex(indexes+x));
if (source_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetIndex(source->pixel_wands[id][x],
GetPixelIndex(indexes+x));
duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->region.x,y,
duplex->region.width,1,duplex->exception);
if (duplex_pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
duplex_indexes=GetCacheViewVirtualIndexQueue(duplex->view);
for (x=0; x < (ssize_t) duplex->region.width; x++)
PixelSetQuantumColor(duplex->pixel_wands[id][x],duplex_pixels+x);
if (duplex_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) duplex->region.width; x++)
PixelSetBlackQuantum(duplex->pixel_wands[id][x],
GetPixelIndex(duplex_indexes+x));
if (duplex_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) duplex->region.width; x++)
PixelSetIndex(duplex->pixel_wands[id][x],
GetPixelIndex(duplex_indexes+x));
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->region.x,y,destination->region.width,1,exception);
if (destination_pixels == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view);
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelSetQuantumColor(destination->pixel_wands[id][x],
destination_pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelSetBlackQuantum(destination->pixel_wands[id][x],
GetPixelIndex(destination_indexes+x));
if (destination_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelSetIndex(destination->pixel_wands[id][x],
GetPixelIndex(destination_indexes+x));
if (transfer(source,duplex,destination,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelGetQuantumColor(destination->pixel_wands[id][x],
destination_pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->region.width; x++)
SetPixelIndex(destination_indexes+x,PixelGetBlackQuantum(
destination->pixel_wands[id][x]));
sync=SyncCacheViewAuthenticPixels(destination->view,exception);
if (sync == MagickFalse)
{
InheritException(destination->exception,GetCacheViewException(
source->view));
status=MagickFalse;
}
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(source_image,DuplexTransferPixelViewTag,
progress,source->region.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewException() returns the severity, reason, and description of any
% error that occurs when utilizing a pixel view.
%
% The format of the GetPixelViewException method is:
%
% char *GetPixelViewException(const PixelWand *pixel_view,
% ExceptionType *severity)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel pixel_view.
%
% o severity: the severity of the error is returned here.
%
*/
WandExport char *GetPixelViewException(const PixelView *pixel_view,
ExceptionType *severity)
{
char
*description;
assert(pixel_view != (const PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
if (pixel_view->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",pixel_view->name);
assert(severity != (ExceptionType *) NULL);
*severity=pixel_view->exception->severity;
description=(char *) AcquireQuantumMemory(2UL*MaxTextExtent,
sizeof(*description));
if (description == (char *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
pixel_view->name);
*description='\0';
if (pixel_view->exception->reason != (char *) NULL)
(void) CopyMagickString(description,GetLocaleExceptionMessage(
pixel_view->exception->severity,pixel_view->exception->reason),
MaxTextExtent);
if (pixel_view->exception->description != (char *) NULL)
{
(void) ConcatenateMagickString(description," (",MaxTextExtent);
(void) ConcatenateMagickString(description,GetLocaleExceptionMessage(
pixel_view->exception->severity,pixel_view->exception->description),
MaxTextExtent);
(void) ConcatenateMagickString(description,")",MaxTextExtent);
}
return(description);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w H e i g h t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewHeight() returns the pixel view height.
%
% The format of the GetPixelViewHeight method is:
%
% size_t GetPixelViewHeight(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport size_t GetPixelViewHeight(const PixelView *pixel_view)
{
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
return(pixel_view->region.height);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewIterator() iterates over the pixel view in parallel and calls
% your get method for each scanline of the view. The pixel region is
% not confined to the image canvas-- that is you can include negative offsets
% or widths or heights that exceed the image dimension. Any updates to
% the pixels in your callback are ignored.
%
% Use this pragma:
%
% #pragma omp critical
%
% to define a section of code in your callback get method that must be
% executed by a single thread at a time.
%
% The format of the GetPixelViewIterator method is:
%
% MagickBooleanType GetPixelViewIterator(PixelView *source,
% GetPixelViewMethod get,void *context)
%
% A description of each parameter follows:
%
% o source: the source pixel view.
%
% o get: the get callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType GetPixelViewIterator(PixelView *source,
GetPixelViewMethod get,void *context)
{
#define GetPixelViewTag "PixelView/Get"
Image
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(source != (PixelView *) NULL);
assert(source->signature == WandSignature);
if (get == (GetPixelViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status)
#endif
for (y=source->region.y; y < (ssize_t) source->region.height; y++)
{
const int
id = GetOpenMPThreadId();
register const IndexPacket
*indexes;
register const PixelPacket
*pixels;
register ssize_t
x;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y,
source->region.width,1,source->exception);
if (pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(source->view);
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],
GetPixelIndex(indexes+x));
if (source_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetIndex(source->pixel_wands[id][x],
GetPixelIndex(indexes+x));
if (get(source,context) == MagickFalse)
status=MagickFalse;
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(source_image,GetPixelViewTag,progress,
source->region.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewPixels() returns the pixel view pixel_wands.
%
% The format of the GetPixelViewPixels method is:
%
% PixelWand *GetPixelViewPixels(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport PixelWand **GetPixelViewPixels(const PixelView *pixel_view)
{
const int
id = GetOpenMPThreadId();
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
return(pixel_view->pixel_wands[id]);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w W a n d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewWand() returns the magick wand associated with the pixel view.
%
% The format of the GetPixelViewWand method is:
%
% MagickWand *GetPixelViewWand(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport MagickWand *GetPixelViewWand(const PixelView *pixel_view)
{
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
return(pixel_view->wand);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w W i d t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewWidth() returns the pixel view width.
%
% The format of the GetPixelViewWidth method is:
%
% size_t GetPixelViewWidth(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport size_t GetPixelViewWidth(const PixelView *pixel_view)
{
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
return(pixel_view->region.width);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w X %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewX() returns the pixel view x offset.
%
% The format of the GetPixelViewX method is:
%
% ssize_t GetPixelViewX(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport ssize_t GetPixelViewX(const PixelView *pixel_view)
{
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
return(pixel_view->region.x);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w Y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewY() returns the pixel view y offset.
%
% The format of the GetPixelViewY method is:
%
% ssize_t GetPixelViewY(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport ssize_t GetPixelViewY(const PixelView *pixel_view)
{
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
return(pixel_view->region.y);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s P i x e l V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsPixelView() returns MagickTrue if the the parameter is verified as a pixel
% view container.
%
% The format of the IsPixelView method is:
%
% MagickBooleanType IsPixelView(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport MagickBooleanType IsPixelView(const PixelView *pixel_view)
{
size_t
length;
if (pixel_view == (const PixelView *) NULL)
return(MagickFalse);
if (pixel_view->signature != WandSignature)
return(MagickFalse);
length=strlen(PixelViewId);
if (LocaleNCompare(pixel_view->name,PixelViewId,length) != 0)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k C l i p P a t h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickClipPathImage() clips along the named paths from the 8BIM profile, if
% present. Later operations take effect inside the path. Id may be a number
% if preceded with #, to work on a numbered path, e.g., "#1" to use the first
% path.
%
% The format of the MagickClipPathImage method is:
%
% MagickBooleanType MagickClipPathImage(MagickWand *wand,
% const char *pathname,const MagickBooleanType inside)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o pathname: name of clipping path resource. If name is preceded by #, use
% clipping path numbered by name.
%
% o inside: if non-zero, later operations take effect inside clipping path.
% Otherwise later operations take effect outside clipping path.
%
*/
WandExport MagickBooleanType MagickClipPathImage(MagickWand *wand,
const char *pathname,const MagickBooleanType inside)
{
return(MagickClipImagePath(wand,pathname,inside));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w G e t F i l l A l p h a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawGetFillAlpha() returns the alpha used when drawing using the fill
% color or fill texture. Fully opaque is 1.0.
%
% The format of the DrawGetFillAlpha method is:
%
% double DrawGetFillAlpha(const DrawingWand *wand)
%
% A description of each parameter follows:
%
% o wand: the drawing wand.
%
*/
WandExport double DrawGetFillAlpha(const DrawingWand *wand)
{
return(DrawGetFillOpacity(wand));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w G e t S t r o k e A l p h a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawGetStrokeAlpha() returns the alpha of stroked object outlines.
%
% The format of the DrawGetStrokeAlpha method is:
%
% double DrawGetStrokeAlpha(const DrawingWand *wand)
%
% A description of each parameter follows:
%
% o wand: the drawing wand.
*/
WandExport double DrawGetStrokeAlpha(const DrawingWand *wand)
{
return(DrawGetStrokeOpacity(wand));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P e e k G r a p h i c W a n d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPeekGraphicWand() returns the current drawing wand.
%
% The format of the PeekDrawingWand method is:
%
% DrawInfo *DrawPeekGraphicWand(const DrawingWand *wand)
%
% A description of each parameter follows:
%
% o wand: the drawing wand.
%
*/
WandExport DrawInfo *DrawPeekGraphicWand(const DrawingWand *wand)
{
return(PeekDrawingWand(wand));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P o p G r a p h i c C o n t e x t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPopGraphicContext() destroys the current drawing wand and returns to the
% previously pushed drawing wand. Multiple drawing wands may exist. It is an
% error to attempt to pop more drawing wands than have been pushed, and it is
% proper form to pop all drawing wands which have been pushed.
%
% The format of the DrawPopGraphicContext method is:
%
% MagickBooleanType DrawPopGraphicContext(DrawingWand *wand)
%
% A description of each parameter follows:
%
% o wand: the drawing wand.
%
*/
WandExport void DrawPopGraphicContext(DrawingWand *wand)
{
(void) PopDrawingWand(wand);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P u s h G r a p h i c C o n t e x t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPushGraphicContext() clones the current drawing wand to create a new
% drawing wand. The original drawing wand(s) may be returned to by
% invoking PopDrawingWand(). The drawing wands are stored on a drawing wand
% stack. For every Pop there must have already been an equivalent Push.
%
% The format of the DrawPushGraphicContext method is:
%
% MagickBooleanType DrawPushGraphicContext(DrawingWand *wand)
%
% A description of each parameter follows:
%
% o wand: the drawing wand.
%
*/
WandExport void DrawPushGraphicContext(DrawingWand *wand)
{
(void) PushDrawingWand(wand);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w S e t F i l l A l p h a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawSetFillAlpha() sets the alpha to use when drawing using the fill
% color or fill texture. Fully opaque is 1.0.
%
% The format of the DrawSetFillAlpha method is:
%
% void DrawSetFillAlpha(DrawingWand *wand,const double fill_alpha)
%
% A description of each parameter follows:
%
% o wand: the drawing wand.
%
% o fill_alpha: fill alpha
%
*/
WandExport void DrawSetFillAlpha(DrawingWand *wand,const double fill_alpha)
{
DrawSetFillOpacity(wand,fill_alpha);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w S e t S t r o k e A l p h a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawSetStrokeAlpha() specifies the alpha of stroked object outlines.
%
% The format of the DrawSetStrokeAlpha method is:
%
% void DrawSetStrokeAlpha(DrawingWand *wand,const double stroke_alpha)
%
% A description of each parameter follows:
%
% o wand: the drawing wand.
%
% o stroke_alpha: stroke alpha. The value 1.0 is opaque.
%
*/
WandExport void DrawSetStrokeAlpha(DrawingWand *wand,const double stroke_alpha)
{
DrawSetStrokeOpacity(wand,stroke_alpha);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k C o l o r F l o o d f i l l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickColorFloodfillImage() changes the color value of any pixel that matches
% target and is an immediate neighbor. If the method FillToBorderMethod is
% specified, the color value is changed for any neighbor pixel that does not
% match the bordercolor member of image.
%
% The format of the MagickColorFloodfillImage method is:
%
% MagickBooleanType MagickColorFloodfillImage(MagickWand *wand,
% const PixelWand *fill,const double fuzz,const PixelWand *bordercolor,
% const ssize_t x,const ssize_t y)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o fill: the floodfill color pixel wand.
%
% o fuzz: By default target must match a particular pixel color
% exactly. However, in many cases two colors may differ by a small amount.
% The fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now interpreted
% as the same color for the purposes of the floodfill.
%
% o bordercolor: the border color pixel wand.
%
% o x,y: the starting location of the operation.
%
*/
WandExport MagickBooleanType MagickColorFloodfillImage(MagickWand *wand,
const PixelWand *fill,const double fuzz,const PixelWand *bordercolor,
const ssize_t x,const ssize_t y)
{
DrawInfo
*draw_info;
MagickBooleanType
status;
PixelPacket
target;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
draw_info=CloneDrawInfo(wand->image_info,(DrawInfo *) NULL);
PixelGetQuantumColor(fill,&draw_info->fill);
(void) GetOneVirtualPixel(wand->images,x % wand->images->columns,
y % wand->images->rows,&target,wand->exception);
if (bordercolor != (PixelWand *) NULL)
PixelGetQuantumColor(bordercolor,&target);
wand->images->fuzz=fuzz;
status=ColorFloodfillImage(wand->images,draw_info,target,x,y,
bordercolor != (PixelWand *) NULL ? FillToBorderMethod : FloodfillMethod);
if (status == MagickFalse)
InheritException(wand->exception,&wand->images->exception);
draw_info=DestroyDrawInfo(draw_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k D e s c r i b e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickDescribeImage() identifies an image by printing its attributes to the
% file. Attributes include the image width, height, size, and others.
%
% The format of the MagickDescribeImage method is:
%
% const char *MagickDescribeImage(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
WandExport char *MagickDescribeImage(MagickWand *wand)
{
return(MagickIdentifyImage(wand));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k F l a t t e n I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickFlattenImages() merges a sequence of images. This useful for
% combining Photoshop layers into a single image.
%
% The format of the MagickFlattenImages method is:
%
% MagickWand *MagickFlattenImages(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
WandExport MagickWand *MagickFlattenImages(MagickWand *wand)
{
Image
*flatten_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
return((MagickWand *) NULL);
flatten_image=FlattenImages(wand->images,wand->exception);
if (flatten_image == (Image *) NULL)
return((MagickWand *) NULL);
return(CloneMagickWandFromImages(wand,flatten_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k G e t I m a g e A t t r i b u t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickGetImageAttribute() returns a value associated with the specified
% property. Use MagickRelinquishMemory() to free the value when you are
% finished with it.
%
% The format of the MagickGetImageAttribute method is:
%
% char *MagickGetImageAttribute(MagickWand *wand,const char *property)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o property: the property.
%
*/
WandExport char *MagickGetImageAttribute(MagickWand *wand,const char *property)
{
return(MagickGetImageProperty(wand,property));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M a g i c k G e t I m a g e I n d e x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickGetImageIndex() returns the index of the current image.
%
% The format of the MagickGetImageIndex method is:
%
% ssize_t MagickGetImageIndex(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
WandExport ssize_t MagickGetImageIndex(MagickWand *wand)
{
return(MagickGetIteratorIndex(wand));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M a g i c k G e t I m a g e C h a n n e l E x t r e m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickGetImageChannelExtrema() gets the extrema for one or more image
% channels.
%
% The format of the MagickGetImageChannelExtrema method is:
%
% MagickBooleanType MagickGetImageChannelExtrema(MagickWand *wand,
% const ChannelType channel,size_t *minima,size_t *maxima)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o channel: the image channel(s).
%
% o minima: The minimum pixel value for the specified channel(s).
%
% o maxima: The maximum pixel value for the specified channel(s).
%
*/
WandExport MagickBooleanType MagickGetImageChannelExtrema(MagickWand *wand,
const ChannelType channel,size_t *minima,size_t *maxima)
{
MagickBooleanType
status;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
status=GetImageChannelExtrema(wand->images,channel,minima,maxima,
wand->exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M a g i c k G e t I m a g e E x t r e m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickGetImageExtrema() gets the extrema for the image.
%
% The format of the MagickGetImageExtrema method is:
%
% MagickBooleanType MagickGetImageExtrema(MagickWand *wand,
% size_t *minima,size_t *maxima)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o minima: The minimum pixel value for the specified channel(s).
%
% o maxima: The maximum pixel value for the specified channel(s).
%
*/
WandExport MagickBooleanType MagickGetImageExtrema(MagickWand *wand,
size_t *minima,size_t *maxima)
{
MagickBooleanType
status;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
status=GetImageExtrema(wand->images,minima,maxima,wand->exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k G e t I m a g e M a t t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickGetImageMatte() returns MagickTrue if the image has a matte channel
% otherwise MagickFalse.
%
% The format of the MagickGetImageMatte method is:
%
% size_t MagickGetImageMatte(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
WandExport MagickBooleanType MagickGetImageMatte(MagickWand *wand)
{
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
return(wand->images->matte);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k G e t I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickGetImagePixels() extracts pixel data from an image and returns it to
% you. The method returns MagickTrue on success otherwise MagickFalse if an
% error is encountered. The data is returned as char, short int, int, ssize_t,
% float, or double in the order specified by map.
%
% Suppose you want to extract the first scanline of a 640x480 image as
% character data in red-green-blue order:
%
% MagickGetImagePixels(wand,0,0,640,1,"RGB",CharPixel,pixels);
%
% The format of the MagickGetImagePixels method is:
%
% MagickBooleanType MagickGetImagePixels(MagickWand *wand,
% const ssize_t x,const ssize_t y,const size_t columns,
% const size_t rows,const char *map,const StorageType storage,
% void *pixels)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o x, y, columns, rows: These values define the perimeter
% of a region of pixels you want to extract.
%
% o map: This string reflects the expected ordering of the pixel array.
% It can be any combination or order of R = red, G = green, B = blue,
% A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan,
% Y = yellow, M = magenta, K = black, I = intensity (for grayscale),
% P = pad.
%
% o storage: Define the data type of the pixels. Float and double types are
% expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose from
% these types: CharPixel, DoublePixel, FloatPixel, IntegerPixel,
% LongPixel, QuantumPixel, or ShortPixel.
%
% o pixels: This array of values contain the pixel components as defined by
% map and type. You must preallocate this array where the expected
% length varies depending on the values of width, height, map, and type.
%
*/
WandExport MagickBooleanType MagickGetImagePixels(MagickWand *wand,
const ssize_t x,const ssize_t y,const size_t columns,
const size_t rows,const char *map,const StorageType storage,
void *pixels)
{
return(MagickExportImagePixels(wand,x,y,columns,rows,map,storage,pixels));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k G e t I m a g e S i z e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickGetImageSize() returns the image length in bytes.
%
% The format of the MagickGetImageSize method is:
%
% MagickBooleanType MagickGetImageSize(MagickWand *wand,
% MagickSizeType *length)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o length: the image length in bytes.
%
*/
WandExport MagickSizeType MagickGetImageSize(MagickWand *wand)
{
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
return(GetBlobSize(wand->images));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M a p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickMapImage() replaces the colors of an image with the closest color
% from a reference image.
%
% The format of the MagickMapImage method is:
%
% MagickBooleanType MagickMapImage(MagickWand *wand,
% const MagickWand *map_wand,const MagickBooleanType dither)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o map: the map wand.
%
% o dither: Set this integer value to something other than zero to dither
% the mapped image.
%
*/
WandExport MagickBooleanType MagickMapImage(MagickWand *wand,
const MagickWand *map_wand,const MagickBooleanType dither)
{
MagickBooleanType
status;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if ((wand->images == (Image *) NULL) || (map_wand->images == (Image *) NULL))
ThrowWandException(WandError,"ContainsNoImages",wand->name);
status=MapImage(wand->images,map_wand->images,dither);
if (status == MagickFalse)
InheritException(wand->exception,&wand->images->exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M a t t e F l o o d f i l l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickMatteFloodfillImage() changes the transparency value of any pixel that
% matches target and is an immediate neighbor. If the method
% FillToBorderMethod is specified, the transparency value is changed for any
% neighbor pixel that does not match the bordercolor member of image.
%
% The format of the MagickMatteFloodfillImage method is:
%
% MagickBooleanType MagickMatteFloodfillImage(MagickWand *wand,
% const double alpha,const double fuzz,const PixelWand *bordercolor,
% const ssize_t x,const ssize_t y)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o alpha: the level of transparency: 1.0 is fully opaque and 0.0 is fully
% transparent.
%
% o fuzz: By default target must match a particular pixel color
% exactly. However, in many cases two colors may differ by a small amount.
% The fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now interpreted
% as the same color for the purposes of the floodfill.
%
% o bordercolor: the border color pixel wand.
%
% o x,y: the starting location of the operation.
%
*/
WandExport MagickBooleanType MagickMatteFloodfillImage(MagickWand *wand,
const double alpha,const double fuzz,const PixelWand *bordercolor,
const ssize_t x,const ssize_t y)
{
DrawInfo
*draw_info;
MagickBooleanType
status;
PixelPacket
target;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
draw_info=CloneDrawInfo(wand->image_info,(DrawInfo *) NULL);
(void) GetOneVirtualPixel(wand->images,x % wand->images->columns,
y % wand->images->rows,&target,wand->exception);
if (bordercolor != (PixelWand *) NULL)
PixelGetQuantumColor(bordercolor,&target);
wand->images->fuzz=fuzz;
status=MatteFloodfillImage(wand->images,target,ClampToQuantum(
(MagickRealType) QuantumRange-QuantumRange*alpha),x,y,bordercolor !=
(PixelWand *) NULL ? FillToBorderMethod : FloodfillMethod);
if (status == MagickFalse)
InheritException(wand->exception,&wand->images->exception);
draw_info=DestroyDrawInfo(draw_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M e d i a n F i l t e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickMedianFilterImage() applies a digital filter that improves the quality
% of a noisy image. Each pixel is replaced by the median in a set of
% neighboring pixels as defined by radius.
%
% The format of the MagickMedianFilterImage method is:
%
% MagickBooleanType MagickMedianFilterImage(MagickWand *wand,
% const double radius)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o radius: the radius of the pixel neighborhood.
%
*/
WandExport MagickBooleanType MagickMedianFilterImage(MagickWand *wand,
const double radius)
{
Image
*median_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
median_image=MedianFilterImage(wand->images,radius,wand->exception);
if (median_image == (Image *) NULL)
return(MagickFalse);
ReplaceImageInList(&wand->images,median_image);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M i n i m u m I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickMinimumImages() returns the minimum intensity of an image sequence.
%
% The format of the MagickMinimumImages method is:
%
% MagickWand *MagickMinimumImages(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
WandExport MagickWand *MagickMinimumImages(MagickWand *wand)
{
Image
*minimum_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
return((MagickWand *) NULL);
minimum_image=EvaluateImages(wand->images,MinEvaluateOperator,
wand->exception);
if (minimum_image == (Image *) NULL)
return((MagickWand *) NULL);
return(CloneMagickWandFromImages(wand,minimum_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M o d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickModeImage() makes each pixel the 'predominant color' of the
% neighborhood of the specified radius.
%
% The format of the MagickModeImage method is:
%
% MagickBooleanType MagickModeImage(MagickWand *wand,
% const double radius)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o radius: the radius of the pixel neighborhood.
%
*/
WandExport MagickBooleanType MagickModeImage(MagickWand *wand,
const double radius)
{
Image
*mode_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
mode_image=ModeImage(wand->images,radius,wand->exception);
if (mode_image == (Image *) NULL)
return(MagickFalse);
ReplaceImageInList(&wand->images,mode_image);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M o s a i c I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickMosaicImages() inlays an image sequence to form a single coherent
% picture. It returns a wand with each image in the sequence composited at
% the location defined by the page offset of the image.
%
% The format of the MagickMosaicImages method is:
%
% MagickWand *MagickMosaicImages(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
WandExport MagickWand *MagickMosaicImages(MagickWand *wand)
{
Image
*mosaic_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
return((MagickWand *) NULL);
mosaic_image=MosaicImages(wand->images,wand->exception);
if (mosaic_image == (Image *) NULL)
return((MagickWand *) NULL);
return(CloneMagickWandFromImages(wand,mosaic_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k O p a q u e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickOpaqueImage() changes any pixel that matches color with the color
% defined by fill.
%
% The format of the MagickOpaqueImage method is:
%
% MagickBooleanType MagickOpaqueImage(MagickWand *wand,
% const PixelWand *target,const PixelWand *fill,const double fuzz)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o channel: the channel(s).
%
% o target: Change this target color to the fill color within the image.
%
% o fill: the fill pixel wand.
%
% o fuzz: By default target must match a particular pixel color
% exactly. However, in many cases two colors may differ by a small amount.
% The fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now interpreted
% as the same color for the purposes of the floodfill.
%
*/
WandExport MagickBooleanType MagickOpaqueImage(MagickWand *wand,
const PixelWand *target,const PixelWand *fill,const double fuzz)
{
return(MagickPaintOpaqueImage(wand,target,fill,fuzz));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k P a i n t F l o o d f i l l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickPaintFloodfillImage() changes the color value of any pixel that matches
% target and is an immediate neighbor. If the method FillToBorderMethod is
% specified, the color value is changed for any neighbor pixel that does not
% match the bordercolor member of image.
%
% The format of the MagickPaintFloodfillImage method is:
%
% MagickBooleanType MagickPaintFloodfillImage(MagickWand *wand,
% const ChannelType channel,const PixelWand *fill,const double fuzz,
% const PixelWand *bordercolor,const ssize_t x,const ssize_t y)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o channel: the channel(s).
%
% o fill: the floodfill color pixel wand.
%
% o fuzz: By default target must match a particular pixel color
% exactly. However, in many cases two colors may differ by a small amount.
% The fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now interpreted
% as the same color for the purposes of the floodfill.
%
% o bordercolor: the border color pixel wand.
%
% o x,y: the starting location of the operation.
%
*/
WandExport MagickBooleanType MagickPaintFloodfillImage(MagickWand *wand,
const ChannelType channel,const PixelWand *fill,const double fuzz,
const PixelWand *bordercolor,const ssize_t x,const ssize_t y)
{
MagickBooleanType
status;
status=MagickFloodfillPaintImage(wand,channel,fill,fuzz,bordercolor,x,y,
MagickFalse);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k P a i n t O p a q u e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickPaintOpaqueImage() changes any pixel that matches color with the color
% defined by fill.
%
% The format of the MagickPaintOpaqueImage method is:
%
% MagickBooleanType MagickPaintOpaqueImage(MagickWand *wand,
% const PixelWand *target,const PixelWand *fill,const double fuzz)
% MagickBooleanType MagickPaintOpaqueImageChannel(MagickWand *wand,
% const ChannelType channel,const PixelWand *target,
% const PixelWand *fill,const double fuzz)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o channel: the channel(s).
%
% o target: Change this target color to the fill color within the image.
%
% o fill: the fill pixel wand.
%
% o fuzz: By default target must match a particular pixel color
% exactly. However, in many cases two colors may differ by a small amount.
% The fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now interpreted
% as the same color for the purposes of the floodfill.
%
*/
WandExport MagickBooleanType MagickPaintOpaqueImage(MagickWand *wand,
const PixelWand *target,const PixelWand *fill,const double fuzz)
{
return(MagickPaintOpaqueImageChannel(wand,DefaultChannels,target,fill,fuzz));
}
WandExport MagickBooleanType MagickPaintOpaqueImageChannel(MagickWand *wand,
const ChannelType channel,const PixelWand *target,const PixelWand *fill,
const double fuzz)
{
MagickBooleanType
status;
status=MagickOpaquePaintImageChannel(wand,channel,target,fill,fuzz,
MagickFalse);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k P a i n t T r a n s p a r e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickPaintTransparentImage() changes any pixel that matches color with the
% color defined by fill.
%
% The format of the MagickPaintTransparentImage method is:
%
% MagickBooleanType MagickPaintTransparentImage(MagickWand *wand,
% const PixelWand *target,const double alpha,const double fuzz)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o target: Change this target color to specified opacity value within
% the image.
%
% o alpha: the level of transparency: 1.0 is fully opaque and 0.0 is fully
% transparent.
%
% o fuzz: By default target must match a particular pixel color
% exactly. However, in many cases two colors may differ by a small amount.
% The fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now interpreted
% as the same color for the purposes of the floodfill.
%
*/
WandExport MagickBooleanType MagickPaintTransparentImage(MagickWand *wand,
const PixelWand *target,const double alpha,const double fuzz)
{
return(MagickTransparentPaintImage(wand,target,alpha,fuzz,MagickFalse));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k R a d i a l B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickRadialBlurImage() radial blurs an image.
%
% The format of the MagickRadialBlurImage method is:
%
% MagickBooleanType MagickRadialBlurImage(MagickWand *wand,
% const double angle)
% MagickBooleanType MagickRadialBlurImageChannel(MagickWand *wand,
% const ChannelType channel,const double angle)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o channel: the image channel(s).
%
% o angle: the angle of the blur in degrees.
%
*/
WandExport MagickBooleanType MagickRadialBlurImage(MagickWand *wand,
const double angle)
{
return(MagickRotationalBlurImage(wand,angle));
}
WandExport MagickBooleanType MagickRadialBlurImageChannel(MagickWand *wand,
const ChannelType channel,const double angle)
{
return(MagickRotationalBlurImageChannel(wand,channel,angle));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k R e c o l o r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickRecolorImage() apply color transformation to an image. The method
% permits saturation changes, hue rotation, luminance to alpha, and various
% other effects. Although variable-sized transformation matrices can be used,
% typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA
% (or RGBA with offsets). The matrix is similar to those used by Adobe Flash
% except offsets are in column 6 rather than 5 (in support of CMYKA images)
% and offsets are normalized (divide Flash offset by 255).
%
% The format of the MagickRecolorImage method is:
%
% MagickBooleanType MagickRecolorImage(MagickWand *wand,
% const size_t order,const double *color_matrix)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o order: the number of columns and rows in the color matrix.
%
% o color_matrix: An array of doubles representing the color matrix.
%
*/
WandExport MagickBooleanType MagickRecolorImage(MagickWand *wand,
const size_t order,const double *color_matrix)
{
Image
*transform_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (color_matrix == (const double *) NULL)
return(MagickFalse);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
transform_image=RecolorImage(wand->images,order,color_matrix,
wand->exception);
if (transform_image == (Image *) NULL)
return(MagickFalse);
ReplaceImageInList(&wand->images,transform_image);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k R e d u c e N o i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickReduceNoiseImage() smooths the contours of an image while still
% preserving edge information. The algorithm works by replacing each pixel
% with its neighbor closest in value. A neighbor is defined by radius. Use
% a radius of 0 and ReduceNoise() selects a suitable radius for you.
%
% The format of the MagickReduceNoiseImage method is:
%
% MagickBooleanType MagickReduceNoiseImage(MagickWand *wand,
% const double radius)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o radius: the radius of the pixel neighborhood.
%
*/
WandExport MagickBooleanType MagickReduceNoiseImage(MagickWand *wand,
const double radius)
{
Image
*noise_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
noise_image=ReduceNoiseImage(wand->images,radius,wand->exception);
if (noise_image == (Image *) NULL)
return(MagickFalse);
ReplaceImageInList(&wand->images,noise_image);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M a x i m u m I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickMaximumImages() returns the maximum intensity of an image sequence.
%
% The format of the MagickMaximumImages method is:
%
% MagickWand *MagickMaximumImages(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
WandExport MagickWand *MagickMaximumImages(MagickWand *wand)
{
Image
*maximum_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
return((MagickWand *) NULL);
maximum_image=EvaluateImages(wand->images,MaxEvaluateOperator,
wand->exception);
if (maximum_image == (Image *) NULL)
return((MagickWand *) NULL);
return(CloneMagickWandFromImages(wand,maximum_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k S e t I m a g e A t t r i b u t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickSetImageAttribute() associates a property with an image.
%
% The format of the MagickSetImageAttribute method is:
%
% MagickBooleanType MagickSetImageAttribute(MagickWand *wand,
% const char *property,const char *value)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o property: the property.
%
% o value: the value.
%
*/
WandExport MagickBooleanType MagickSetImageAttribute(MagickWand *wand,
const char *property,const char *value)
{
return(SetImageProperty(wand->images,property,value));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k S e t I m a g e I n d e x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickSetImageIndex() set the current image to the position of the list
% specified with the index parameter.
%
% The format of the MagickSetImageIndex method is:
%
% MagickBooleanType MagickSetImageIndex(MagickWand *wand,
% const ssize_t index)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o index: the scene number.
%
*/
WandExport MagickBooleanType MagickSetImageIndex(MagickWand *wand,
const ssize_t index)
{
return(MagickSetIteratorIndex(wand,index));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M a g i c k S e t I m a g e O p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickSetImageOption() associates one or options with a particular image
% format (.e.g MagickSetImageOption(wand,"jpeg","perserve","yes").
%
% The format of the MagickSetImageOption method is:
%
% MagickBooleanType MagickSetImageOption(MagickWand *wand,
% const char *format,const char *key,const char *value)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o format: the image format.
%
% o key: The key.
%
% o value: The value.
%
*/
WandExport MagickBooleanType MagickSetImageOption(MagickWand *wand,
const char *format,const char *key,const char *value)
{
char
option[MaxTextExtent];
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
(void) FormatLocaleString(option,MaxTextExtent,"%s:%s=%s",format,key,value);
return(DefineImageOption(wand->image_info,option));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k T r a n s p a r e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickTransparentImage() changes any pixel that matches color with the
% color defined by fill.
%
% The format of the MagickTransparentImage method is:
%
% MagickBooleanType MagickTransparentImage(MagickWand *wand,
% const PixelWand *target,const double alpha,const double fuzz)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o target: Change this target color to specified opacity value within
% the image.
%
% o alpha: the level of transparency: 1.0 is fully opaque and 0.0 is fully
% transparent.
%
% o fuzz: By default target must match a particular pixel color
% exactly. However, in many cases two colors may differ by a small amount.
% The fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now interpreted
% as the same color for the purposes of the floodfill.
%
*/
WandExport MagickBooleanType MagickTransparentImage(MagickWand *wand,
const PixelWand *target,const double alpha,const double fuzz)
{
return(MagickPaintTransparentImage(wand,target,alpha,fuzz));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k R e g i o n O f I n t e r e s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickRegionOfInterestImage() extracts a region of the image and returns it
% as a new wand.
%
% The format of the MagickRegionOfInterestImage method is:
%
% MagickWand *MagickRegionOfInterestImage(MagickWand *wand,
% const size_t width,const size_t height,const ssize_t x,
% const ssize_t y)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o width: the region width.
%
% o height: the region height.
%
% o x: the region x offset.
%
% o y: the region y offset.
%
*/
WandExport MagickWand *MagickRegionOfInterestImage(MagickWand *wand,
const size_t width,const size_t height,const ssize_t x,
const ssize_t y)
{
return(MagickGetImageRegion(wand,width,height,x,y));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k S e t I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickSetImagePixels() accepts pixel datand stores it in the image at the
% location you specify. The method returns MagickFalse on success otherwise
% MagickTrue if an error is encountered. The pixel data can be either char,
% short int, int, ssize_t, float, or double in the order specified by map.
%
% Suppose your want to upload the first scanline of a 640x480 image from
% character data in red-green-blue order:
%
% MagickSetImagePixels(wand,0,0,640,1,"RGB",CharPixel,pixels);
%
% The format of the MagickSetImagePixels method is:
%
% MagickBooleanType MagickSetImagePixels(MagickWand *wand,
% const ssize_t x,const ssize_t y,const size_t columns,
% const size_t rows,const char *map,const StorageType storage,
% const void *pixels)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o x, y, columns, rows: These values define the perimeter of a region
% of pixels you want to define.
%
% o map: This string reflects the expected ordering of the pixel array.
% It can be any combination or order of R = red, G = green, B = blue,
% A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan,
% Y = yellow, M = magenta, K = black, I = intensity (for grayscale),
% P = pad.
%
% o storage: Define the data type of the pixels. Float and double types are
% expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose from
% these types: CharPixel, ShortPixel, IntegerPixel, LongPixel, FloatPixel,
% or DoublePixel.
%
% o pixels: This array of values contain the pixel components as defined by
% map and type. You must preallocate this array where the expected
% length varies depending on the values of width, height, map, and type.
%
*/
WandExport MagickBooleanType MagickSetImagePixels(MagickWand *wand,
const ssize_t x,const ssize_t y,const size_t columns,
const size_t rows,const char *map,const StorageType storage,
const void *pixels)
{
return(MagickImportImagePixels(wand,x,y,columns,rows,map,storage,pixels));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k W r i t e I m a g e B l o b %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickWriteImageBlob() implements direct to memory image formats. It
% returns the image as a blob and its length. Use MagickSetFormat() to
% set the format of the returned blob (GIF, JPEG, PNG, etc.).
%
% Use MagickRelinquishMemory() to free the blob when you are done with it.
%
% The format of the MagickWriteImageBlob method is:
%
% unsigned char *MagickWriteImageBlob(MagickWand *wand,size_t *length)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o length: the length of the blob.
%
*/
WandExport unsigned char *MagickWriteImageBlob(MagickWand *wand,size_t *length)
{
return(MagickGetImageBlob(wand,length));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w P i x e l V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewPixelView() returns a pixel view required for all other methods in the
% Pixel View API.
%
% The format of the NewPixelView method is:
%
% PixelView *NewPixelView(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the wand.
%
*/
static PixelWand ***AcquirePixelsThreadSet(const size_t number_wands,
const size_t number_threads)
{
PixelWand
***pixel_wands;
register ssize_t
i;
pixel_wands=(PixelWand ***) AcquireQuantumMemory(number_threads,
sizeof(*pixel_wands));
if (pixel_wands == (PixelWand ***) NULL)
return((PixelWand ***) NULL);
(void) memset(pixel_wands,0,number_threads*sizeof(*pixel_wands));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixel_wands[i]=NewPixelWands(number_wands);
if (pixel_wands[i] == (PixelWand **) NULL)
return(DestroyPixelsThreadSet(pixel_wands,number_wands,number_threads));
}
return(pixel_wands);
}
WandExport PixelView *NewPixelView(MagickWand *wand)
{
PixelView
*pixel_view;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == MagickCoreSignature);
pixel_view=(PixelView *) AcquireMagickMemory(sizeof(*pixel_view));
if (pixel_view == (PixelView *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
(void) memset(pixel_view,0,sizeof(*pixel_view));
pixel_view->id=AcquireWandId();
(void) FormatLocaleString(pixel_view->name,MaxTextExtent,"%s-%.20g",
PixelViewId,(double) pixel_view->id);
pixel_view->exception=AcquireExceptionInfo();
pixel_view->wand=wand;
pixel_view->view=AcquireVirtualCacheView(pixel_view->wand->images,
pixel_view->exception);
pixel_view->region.width=wand->images->columns;
pixel_view->region.height=wand->images->rows;
pixel_view->number_threads=GetOpenMPMaximumThreads();
pixel_view->pixel_wands=AcquirePixelsThreadSet(pixel_view->region.width,
pixel_view->number_threads);
if (pixel_view->pixel_wands == (PixelWand ***) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
pixel_view->debug=IsEventLogging();
pixel_view->signature=WandSignature;
return(pixel_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w P i x e l V i e w R e g i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewPixelViewRegion() returns a pixel view required for all other methods
% in the Pixel View API.
%
% The format of the NewPixelViewRegion method is:
%
% PixelView *NewPixelViewRegion(MagickWand *wand,const ssize_t x,
% const ssize_t y,const size_t width,const size_t height)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixel_wands view.
%
*/
WandExport PixelView *NewPixelViewRegion(MagickWand *wand,const ssize_t x,
const ssize_t y,const size_t width,const size_t height)
{
PixelView
*pixel_view;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == MagickCoreSignature);
pixel_view=(PixelView *) AcquireMagickMemory(sizeof(*pixel_view));
if (pixel_view == (PixelView *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
(void) memset(pixel_view,0,sizeof(*pixel_view));
pixel_view->id=AcquireWandId();
(void) FormatLocaleString(pixel_view->name,MaxTextExtent,"%s-%.20g",
PixelViewId,(double) pixel_view->id);
pixel_view->exception=AcquireExceptionInfo();
pixel_view->view=AcquireVirtualCacheView(pixel_view->wand->images,
pixel_view->exception);
pixel_view->wand=wand;
pixel_view->region.width=width;
pixel_view->region.height=height;
pixel_view->region.x=x;
pixel_view->region.y=y;
pixel_view->number_threads=GetOpenMPMaximumThreads();
pixel_view->pixel_wands=AcquirePixelsThreadSet(pixel_view->region.width,
pixel_view->number_threads);
if (pixel_view->pixel_wands == (PixelWand ***) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
pixel_view->debug=IsEventLogging();
pixel_view->signature=WandSignature;
return(pixel_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P i x e l G e t N e x t R o w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PixelGetNextRow() returns the next row as an array of pixel wands from the
% pixel iterator.
%
% The format of the PixelGetNextRow method is:
%
% PixelWand **PixelGetNextRow(PixelIterator *iterator,
% size_t *number_wands)
%
% A description of each parameter follows:
%
% o iterator: the pixel iterator.
%
% o number_wands: the number of pixel wands.
%
*/
WandExport PixelWand **PixelGetNextRow(PixelIterator *iterator)
{
size_t
number_wands;
return(PixelGetNextIteratorRow(iterator,&number_wands));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P i x e l I t e r a t o r G e t E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PixelIteratorGetException() returns the severity, reason, and description of
% any error that occurs when using other methods in this API.
%
% The format of the PixelIteratorGetException method is:
%
% char *PixelIteratorGetException(const Pixeliterator *iterator,
% ExceptionType *severity)
%
% A description of each parameter follows:
%
% o iterator: the pixel iterator.
%
% o severity: the severity of the error is returned here.
%
*/
WandExport char *PixelIteratorGetException(const PixelIterator *iterator,
ExceptionType *severity)
{
return(PixelGetIteratorException(iterator,severity));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t P i x e l V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelViewIterator() iterates over the pixel view in parallel and calls
% your set method for each scanline of the view. The pixel region is
% confined to the image canvas-- that is no negative offsets or widths or
% heights that exceed the image dimension. The pixels are initiallly
% undefined and any settings you make in the callback method are automagically
% synced back to your image.
%
% Use this pragma:
%
% #pragma omp critical
%
% to define a section of code in your callback set method that must be
% executed by a single thread at a time.
%
% The format of the SetPixelViewIterator method is:
%
% MagickBooleanType SetPixelViewIterator(PixelView *destination,
% SetPixelViewMethod set,void *context)
%
% A description of each parameter follows:
%
% o destination: the pixel view.
%
% o set: the set callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType SetPixelViewIterator(PixelView *destination,
SetPixelViewMethod set,void *context)
{
#define SetPixelViewTag "PixelView/Set"
ExceptionInfo
*exception;
Image
*destination_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(destination != (PixelView *) NULL);
assert(destination->signature == WandSignature);
if (set == (SetPixelViewMethod) NULL)
return(MagickFalse);
destination_image=destination->wand->images;
if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=destination->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status)
#endif
for (y=destination->region.y; y < (ssize_t) destination->region.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(destination->view,destination->region.x,
y,destination->region.width,1,exception);
if (pixels == (PixelPacket *) NULL)
{
InheritException(destination->exception,GetCacheViewException(
destination->view));
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(destination->view);
if (set(destination,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelGetQuantumColor(destination->pixel_wands[id][x],pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->region.width; x++)
SetPixelIndex(indexes+x,PixelGetBlackQuantum(
destination->pixel_wands[id][x]));
sync=SyncCacheViewAuthenticPixels(destination->view,exception);
if (sync == MagickFalse)
{
InheritException(destination->exception,GetCacheViewException(
destination->view));
status=MagickFalse;
}
if (destination_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(destination_image,SetPixelViewTag,progress,
destination->region.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f e r P i x e l V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransferPixelViewIterator() iterates over two pixel views in parallel and
% calls your transfer method for each scanline of the view. The source pixel
% region is not confined to the image canvas-- that is you can include
% negative offsets or widths or heights that exceed the image dimension.
% However, the destination pixel view is confined to the image canvas-- that
% is no negative offsets or widths or heights that exceed the image dimension
% are permitted.
%
% Use this pragma:
%
% #pragma omp critical
%
% to define a section of code in your callback transfer method that must be
% executed by a single thread at a time.
%
% The format of the TransferPixelViewIterator method is:
%
% MagickBooleanType TransferPixelViewIterator(PixelView *source,
% PixelView *destination,TransferPixelViewMethod transfer,void *context)
%
% A description of each parameter follows:
%
% o source: the source pixel view.
%
% o destination: the destination pixel view.
%
% o transfer: the transfer callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType TransferPixelViewIterator(PixelView *source,
PixelView *destination,TransferPixelViewMethod transfer,void *context)
{
#define TransferPixelViewTag "PixelView/Transfer"
ExceptionInfo
*exception;
Image
*destination_image,
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(source != (PixelView *) NULL);
assert(source->signature == WandSignature);
if (transfer == (TransferPixelViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
destination_image=destination->wand->images;
if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=destination->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status)
#endif
for (y=source->region.y; y < (ssize_t) source->region.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict pixels;
register IndexPacket
*magick_restrict destination_indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict destination_pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y,
source->region.width,1,source->exception);
if (pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(source->view);
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],
GetPixelIndex(indexes+x));
if (source_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetIndex(source->pixel_wands[id][x],
GetPixelIndex(indexes+x));
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->region.x,y,destination->region.width,1,exception);
if (destination_pixels == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view);
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelSetQuantumColor(destination->pixel_wands[id][x],pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelSetBlackQuantum(destination->pixel_wands[id][x],
GetPixelIndex(indexes+x));
if (destination_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelSetIndex(destination->pixel_wands[id][x],
GetPixelIndex(indexes+x));
if (transfer(source,destination,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelGetQuantumColor(destination->pixel_wands[id][x],
destination_pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->region.width; x++)
SetPixelIndex(destination_indexes+x,PixelGetBlackQuantum(
destination->pixel_wands[id][x]));
sync=SyncCacheViewAuthenticPixels(destination->view,exception);
if (sync == MagickFalse)
{
InheritException(destination->exception,GetCacheViewException(
source->view));
status=MagickFalse;
}
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(source_image,TransferPixelViewTag,progress,
source->region.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U p d a t e P i x e l V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UpdatePixelViewIterator() iterates over the pixel view in parallel and calls
% your update method for each scanline of the view. The pixel region is
% confined to the image canvas-- that is no negative offsets or widths or
% heights that exceed the image dimension are permitted. Updates to pixels
% in your callback are automagically synced back to the image.
%
% Use this pragma:
%
% #pragma omp critical
%
% to define a section of code in your callback update method that must be
% executed by a single thread at a time.
%
% The format of the UpdatePixelViewIterator method is:
%
% MagickBooleanType UpdatePixelViewIterator(PixelView *source,
% UpdatePixelViewMethod update,void *context)
%
% A description of each parameter follows:
%
% o source: the source pixel view.
%
% o update: the update callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType UpdatePixelViewIterator(PixelView *source,
UpdatePixelViewMethod update,void *context)
{
#define UpdatePixelViewTag "PixelView/Update"
ExceptionInfo
*exception;
Image
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(source != (PixelView *) NULL);
assert(source->signature == WandSignature);
if (update == (UpdatePixelViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
if (SetImageStorageClass(source_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=source->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status)
#endif
for (y=source->region.y; y < (ssize_t) source->region.height; y++)
{
const int
id = GetOpenMPThreadId();
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(source->view,source->region.x,y,
source->region.width,1,exception);
if (pixels == (PixelPacket *) NULL)
{
InheritException(source->exception,GetCacheViewException(
source->view));
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(source->view);
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],
GetPixelIndex(indexes+x));
if (update(source,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) source->region.width; x++)
PixelGetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->region.width; x++)
SetPixelIndex(indexes+x,PixelGetBlackQuantum(
source->pixel_wands[id][x]));
if (SyncCacheViewAuthenticPixels(source->view,exception) == MagickFalse)
{
InheritException(source->exception,GetCacheViewException(source->view));
status=MagickFalse;
}
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(source_image,UpdatePixelViewTag,progress,
source->region.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
#endif
|
AUC-openmp.c | // Program: AUC-openmp
// Author: Jason Regina
// Date: 12 November 2015
// Description: This program approximates pi using the Riemann Sum method
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <math.h>
#include <omp.h>
// This function returns a y-value on a unit circle
// centered at the origin, given an x-value
double func(double x)
{
return sqrt(1.0 - (x * x));
}
int main( int argc, char** argv )
{
// Set number of rectangles and OMP threads
int recs = 100000000;
int num_threads = 1;
// Parse command line
const char* name = argv[0];
int c;
while ((c = getopt(argc, argv, "n:t:")) != -1)
{
switch(c)
{
case 'n':
recs = atoi(optarg);
break;
case 't':
num_threads = atoi(optarg);
break;
case '?':
default:
fprintf(stderr, "Usage: %s -n [NUMBER_OF_RECTANGLES] -t [OMP_NUM_THREADS]\n", name);
return -1;
}
}
argc -+ optind;
argv += optind;
// Calculate rectangle width
double width;
width = 1.0 / recs;
// Determine first and last elements of process
int first = 0, last = recs;
// Calculate total area
double sum = 0.0;
int i = 0;
// Set OMP Threads
omp_set_num_threads(num_threads);
#pragma omp parallel for reduction(+:sum) shared(first,last,width) private(i)
for (i = first; i < last; i++)
{
sum += func(width * i) * width * 4.0;
}
// Print result
printf(" --- %s --- \n", name);
printf("Number of processes: %d\n", 1);
printf("Threads per process: %d\n", num_threads);
printf("Rectangles : %d\n", recs);
printf("pi is approximately: %f\n", sum);
// Terminate
return 0;
}
|
clean.h | /****************************************************************************
* VCGLib o o *
* Visual and Computer Graphics Library o o *
* _ O _ *
* Copyright(C) 2004-2016 \/)\/ *
* Visual Computing Lab /\/| *
* ISTI - Italian National Research Council | *
* \ *
* All rights reserved. *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License (http://www.gnu.org/licenses/gpl.txt) *
* for more details. *
* *
****************************************************************************/
#ifndef __VCGLIB_CLEAN
#define __VCGLIB_CLEAN
#include <unordered_set>
// VCG headers
#include <vcg/complex/complex.h>
#include <vcg/complex/algorithms/closest.h>
#include <vcg/space/index/grid_static_ptr.h>
#include <vcg/space/index/spatial_hashing.h>
#include <vcg/complex/algorithms/update/normal.h>
#include <vcg/space/triangle3.h>
#include <vcg/complex/append.h>
namespace vcg {
namespace tri{
template <class ConnectedEdgeMeshType>
class EdgeConnectedComponentIterator
{
public:
typedef ConnectedEdgeMeshType MeshType;
typedef typename MeshType::VertexType VertexType;
typedef typename MeshType::VertexPointer VertexPointer;
typedef typename MeshType::VertexIterator VertexIterator;
typedef typename MeshType::ScalarType ScalarType;
typedef typename MeshType::EdgeType EdgeType;
typedef typename MeshType::EdgePointer EdgePointer;
typedef typename MeshType::EdgeIterator EdgeIterator;
typedef typename MeshType::ConstEdgeIterator ConstEdgeIterator;
typedef typename MeshType::EdgeContainer EdgeContainer;
public:
void operator ++()
{
EdgePointer ep = se.top();
se.pop();
for(int i = 0; i < 2; ++i)
{
edge::VEIterator<EdgeType> vei(ep->V(i));
while (!vei.End())
{
if (!tri::IsMarked(*mp, vei.E()))
{
tri::Mark(*mp, vei.E());
se.push(vei.E());
}
++vei;
}
}
}
void start(MeshType &m, EdgePointer e)
{
tri::RequirePerEdgeMark(m);
mp=&m;
while(!se.empty())
se.pop();
UnMarkAll(m);
tri::Mark(m, e);
se.push(e);
}
bool completed() {
return se.empty();
}
EdgePointer operator *()
{
return se.top();
}
private:
std::stack<EdgePointer> se;
MeshType *mp;
};
template <class ConnectedMeshType>
class ConnectedComponentIterator
{
public:
typedef ConnectedMeshType MeshType;
typedef typename MeshType::VertexType VertexType;
typedef typename MeshType::VertexPointer VertexPointer;
typedef typename MeshType::VertexIterator VertexIterator;
typedef typename MeshType::ScalarType ScalarType;
typedef typename MeshType::FaceType FaceType;
typedef typename MeshType::FacePointer FacePointer;
typedef typename MeshType::FaceIterator FaceIterator;
typedef typename MeshType::ConstFaceIterator ConstFaceIterator;
typedef typename MeshType::FaceContainer FaceContainer;
public:
void operator ++()
{
FacePointer fpt=sf.top();
sf.pop();
for(int j=0; j<fpt->VN(); ++j)
if( !face::IsBorder(*fpt,j) )
{
FacePointer l=fpt->FFp(j);
if( !tri::IsMarked(*mp,l) )
{
tri::Mark(*mp,l);
sf.push(l);
}
}
}
void start(MeshType &m, FacePointer p)
{
tri::RequirePerFaceMark(m);
mp=&m;
while(!sf.empty()) sf.pop();
UnMarkAll(m);
tri::Mark(m,p);
sf.push(p);
}
bool completed() {
return sf.empty();
}
FacePointer operator *()
{
return sf.top();
}
private:
std::stack<FacePointer> sf;
MeshType *mp;
};
///
/** \addtogroup trimesh */
/*@{*/
/// Class of static functions to clean//restore meshs.
template <class CleanMeshType>
class Clean
{
public:
typedef CleanMeshType MeshType;
typedef typename MeshType::VertexType VertexType;
typedef typename MeshType::VertexPointer VertexPointer;
typedef typename MeshType::ConstVertexPointer ConstVertexPointer;
typedef typename MeshType::VertexIterator VertexIterator;
typedef typename MeshType::ConstVertexIterator ConstVertexIterator;
typedef typename MeshType::EdgeIterator EdgeIterator;
typedef typename MeshType::EdgePointer EdgePointer;
typedef typename MeshType::CoordType CoordType;
typedef typename MeshType::ScalarType ScalarType;
typedef typename MeshType::FaceType FaceType;
typedef typename MeshType::FacePointer FacePointer;
typedef typename MeshType::FaceIterator FaceIterator;
typedef typename MeshType::ConstFaceIterator ConstFaceIterator;
typedef typename MeshType::FaceContainer FaceContainer;
typedef typename MeshType::TetraType TetraType;
typedef typename MeshType::TetraPointer TetraPointer;
typedef typename MeshType::TetraIterator TetraIterator;
typedef typename MeshType::ConstTetraIterator ConstTetraIterator;
typedef typename vcg::Box3<ScalarType> Box3Type;
typedef GridStaticPtr<FaceType, ScalarType > TriMeshGrid;
/* classe di confronto per l'algoritmo di eliminazione vertici duplicati*/
class RemoveDuplicateVert_Compare{
public:
inline bool operator()(VertexPointer const &a, VertexPointer const &b)
{
return ((*a).cP() == (*b).cP()) ? (a<b): ((*a).cP() < (*b).cP());
}
};
/** This function removes all duplicate vertices of the mesh by looking only at their spatial positions.
* Note that it does not update any topology relation that could be affected by this like the VT or TT relation.
* the reason this function is usually performed BEFORE building any topology information.
*/
static int RemoveDuplicateVertex( MeshType & m, bool RemoveDegenerateFlag=true) // V1.0
{
if(m.vert.size()==0 || m.vn==0) return 0;
std::map<VertexPointer, VertexPointer> mp;
size_t i,j;
VertexIterator vi;
int deleted=0;
int k=0;
size_t num_vert = m.vert.size();
std::vector<VertexPointer> perm(num_vert);
for(vi=m.vert.begin(); vi!=m.vert.end(); ++vi, ++k)
perm[k] = &(*vi);
RemoveDuplicateVert_Compare c_obj;
std::sort(perm.begin(),perm.end(),c_obj);
j = 0;
i = j;
mp[perm[i]] = perm[j];
++i;
for(;i!=num_vert;)
{
if( (! (*perm[i]).IsD()) &&
(! (*perm[j]).IsD()) &&
(*perm[i]).P() == (*perm[j]).cP() )
{
VertexPointer t = perm[i];
mp[perm[i]] = perm[j];
++i;
Allocator<MeshType>::DeleteVertex(m,*t);
deleted++;
}
else
{
j = i;
++i;
}
}
for(FaceIterator fi = m.face.begin(); fi!=m.face.end(); ++fi)
if( !(*fi).IsD() )
for(k = 0; k < (*fi).VN(); ++k)
if( mp.find( (typename MeshType::VertexPointer)(*fi).V(k) ) != mp.end() )
{
(*fi).V(k) = &*mp[ (*fi).V(k) ];
}
for(EdgeIterator ei = m.edge.begin(); ei!=m.edge.end(); ++ei)
if( !(*ei).IsD() )
for(k = 0; k < 2; ++k)
if( mp.find( (typename MeshType::VertexPointer)(*ei).V(k) ) != mp.end() )
{
(*ei).V(k) = &*mp[ (*ei).V(k) ];
}
for (TetraIterator ti = m.tetra.begin(); ti != m.tetra.end(); ++ti)
if (!(*ti).IsD())
for (k = 0; k < 4; ++k)
if (mp.find((typename MeshType::VertexPointer)(*ti).V(k)) != mp.end())
(*ti).V(k) = &*mp[ (*ti).V(k) ];
if(RemoveDegenerateFlag) RemoveDegenerateFace(m);
if(RemoveDegenerateFlag && m.en>0) {
RemoveDegenerateEdge(m);
RemoveDuplicateEdge(m);
}
return deleted;
}
class SortedPair
{
public:
SortedPair() {}
SortedPair(unsigned int v0, unsigned int v1, EdgePointer _fp)
{
v[0]=v0;v[1]=v1;
fp=_fp;
if(v[0]>v[1]) std::swap(v[0],v[1]);
}
bool operator < (const SortedPair &p) const
{
return (v[1]!=p.v[1])?(v[1]<p.v[1]):
(v[0]<p.v[0]); }
bool operator == (const SortedPair &s) const
{
if( (v[0]==s.v[0]) && (v[1]==s.v[1]) ) return true;
return false;
}
unsigned int v[2];
EdgePointer fp;
};
class SortedTriple
{
public:
SortedTriple() {}
SortedTriple(unsigned int v0, unsigned int v1, unsigned int v2,FacePointer _fp)
{
v[0]=v0;v[1]=v1;v[2]=v2;
fp=_fp;
std::sort(v,v+3);
}
bool operator < (const SortedTriple &p) const
{
return (v[2]!=p.v[2])?(v[2]<p.v[2]):
(v[1]!=p.v[1])?(v[1]<p.v[1]):
(v[0]<p.v[0]); }
bool operator == (const SortedTriple &s) const
{
if( (v[0]==s.v[0]) && (v[1]==s.v[1]) && (v[2]==s.v[2]) ) return true;
return false;
}
unsigned int v[3];
FacePointer fp;
};
/** This function removes all duplicate faces of the mesh by looking only at their vertex reference.
So it should be called after unification of vertices.
Note that it does not update any topology relation that could be affected by this like the VT or TT relation.
the reason this function is usually performed BEFORE building any topology information.
*/
static int RemoveDuplicateFace( MeshType & m) // V1.0
{
std::vector<SortedTriple> fvec;
for(FaceIterator fi=m.face.begin();fi!=m.face.end();++fi)
if(!(*fi).IsD())
{
fvec.push_back(SortedTriple( tri::Index(m,(*fi).V(0)),
tri::Index(m,(*fi).V(1)),
tri::Index(m,(*fi).V(2)),
&*fi));
}
std::sort(fvec.begin(),fvec.end());
int total=0;
for(int i=0;i<int(fvec.size())-1;++i)
{
if(fvec[i]==fvec[i+1])
{
total++;
tri::Allocator<MeshType>::DeleteFace(m, *(fvec[i].fp) );
}
}
return total;
}
/** This function removes all duplicate faces of the mesh by looking only at their vertex reference.
So it should be called after unification of vertices.
Note that it does not update any topology relation that could be affected by this like the VT or TT relation.
the reason this function is usually performed BEFORE building any topology information.
*/
static int RemoveDuplicateEdge( MeshType & m) // V1.0
{
if (m.en==0) return 0;
std::vector<SortedPair> eVec;
for(EdgeIterator ei=m.edge.begin();ei!=m.edge.end();++ei)
if(!(*ei).IsD())
{
eVec.push_back(SortedPair( tri::Index(m,(*ei).V(0)), tri::Index(m,(*ei).V(1)), &*ei));
}
std::sort(eVec.begin(),eVec.end());
int total=0;
for(int i=0;i<int(eVec.size())-1;++i)
{
if(eVec[i]==eVec[i+1])
{
total++;
tri::Allocator<MeshType>::DeleteEdge(m, *(eVec[i].fp) );
}
}
return total;
}
static int CountUnreferencedVertex( MeshType& m)
{
return RemoveUnreferencedVertex(m,false);
}
/** This function removes vertices that are not referenced by any face or by any edge.
@param m The mesh
@param DeleteVertexFlag if false prevent the vertex deletion and just count it.
@return The number of removed vertices
*/
static int RemoveUnreferencedVertex( MeshType& m, bool DeleteVertexFlag=true) // V1.0
{
tri::RequirePerVertexFlags(m);
std::vector<bool> referredVec(m.vert.size(),false);
int deleted = 0;
for(auto fi = m.face.begin(); fi != m.face.end(); ++fi)
if( !(*fi).IsD() )
for(auto j=0; j < (*fi).VN(); ++j)
referredVec[tri::Index(m, (*fi).V(j))]=true;
for(auto ei=m.edge.begin();ei!=m.edge.end();++ei)
if( !(*ei).IsD() ){
referredVec[tri::Index(m, (*ei).V(0))]=true;
referredVec[tri::Index(m, (*ei).V(1))]=true;
}
for(auto ti=m.tetra.begin(); ti!=m.tetra.end();++ti)
if( !(*ti).IsD() ){
referredVec[tri::Index(m, (*ti).V(0))]=true;
referredVec[tri::Index(m, (*ti).V(1))]=true;
referredVec[tri::Index(m, (*ti).V(2))]=true;
referredVec[tri::Index(m, (*ti).V(3))]=true;
}
if(!DeleteVertexFlag)
return std::count(referredVec.begin(),referredVec.end(),false);
for(auto vi=m.vert.begin();vi!=m.vert.end();++vi)
if( (!(*vi).IsD()) && (!referredVec[tri::Index(m,*vi)]) )
{
Allocator<MeshType>::DeleteVertex(m,*vi);
++deleted;
}
return deleted;
}
/**
Degenerate vertices are vertices that have coords with invalid floating point values,
All the faces incident on deleted vertices are also deleted
*/
static int RemoveDegenerateVertex(MeshType& m)
{
VertexIterator vi;
int count_vd = 0;
for(vi=m.vert.begin(); vi!=m.vert.end();++vi)
if(math::IsNAN( (*vi).P()[0]) ||
math::IsNAN( (*vi).P()[1]) ||
math::IsNAN( (*vi).P()[2]) )
{
count_vd++;
Allocator<MeshType>::DeleteVertex(m,*vi);
}
FaceIterator fi;
int count_fd = 0;
for(fi=m.face.begin(); fi!=m.face.end();++fi)
if(!(*fi).IsD())
if( (*fi).V(0)->IsD() ||
(*fi).V(1)->IsD() ||
(*fi).V(2)->IsD() )
{
count_fd++;
Allocator<MeshType>::DeleteFace(m,*fi);
}
return count_vd;
}
/**
Degenerate faces are faces that are Topologically degenerate,
i.e. have two or more vertex reference that link the same vertex
(and not only two vertexes with the same coordinates).
All Degenerate faces are zero area faces BUT not all zero area faces are degenerate.
We do not take care of topology because when we have degenerate faces the
topology calculation functions crash.
*/
static int RemoveDegenerateFace(MeshType& m)
{
int count_fd = 0;
for(FaceIterator fi=m.face.begin(); fi!=m.face.end();++fi)
if(!(*fi).IsD())
{
if((*fi).V(0) == (*fi).V(1) ||
(*fi).V(0) == (*fi).V(2) ||
(*fi).V(1) == (*fi).V(2) )
{
count_fd++;
Allocator<MeshType>::DeleteFace(m,*fi);
}
}
return count_fd;
}
static int RemoveDegenerateEdge(MeshType& m)
{
int count_ed = 0;
for(EdgeIterator ei=m.edge.begin(); ei!=m.edge.end();++ei)
if(!(*ei).IsD())
{
if((*ei).V(0) == (*ei).V(1) )
{
count_ed++;
Allocator<MeshType>::DeleteEdge(m,*ei);
}
}
return count_ed;
}
static int RemoveNonManifoldVertex(MeshType& m)
{
CountNonManifoldVertexFF(m,true);
tri::UpdateSelection<MeshType>::FaceFromVertexLoose(m);
int count_removed = 0;
for(FaceIterator fi=m.face.begin(); fi!=m.face.end();++fi)
if(!(*fi).IsD() && (*fi).IsS())
Allocator<MeshType>::DeleteFace(m,*fi);
for(VertexIterator vi=m.vert.begin(); vi!=m.vert.end();++vi)
if(!(*vi).IsD() && (*vi).IsS()) {
++count_removed;
Allocator<MeshType>::DeleteVertex(m,*vi);
}
return count_removed;
}
static int SplitSelectedVertexOnEdgeMesh(MeshType& m)
{
tri::RequireCompactness(m);
// count selected vertices references
std::unordered_map<size_t,size_t> refCount; // selected vertex index -> reference count
size_t countSplit = 0;
for (size_t i=0; i<m.edge.size(); ++i)
{
for (int j=0; j<2; ++j)
{
const VertexPointer vp = m.edge[i].V(j);
if (vp->IsS())
{
const size_t refs = ++refCount[Index(m, m.edge[i].V(j))];
if (refs > 1) {
countSplit++;
}
}
}
}
// actual split
if (countSplit > 0)
{
auto newVertIt = tri::Allocator<MeshType>::AddVertices(m, countSplit);
for (size_t i=0; i<m.edge.size(); ++i)
{
for (int j=0; j<2; ++j)
{
const VertexPointer vp = m.edge[i].V(j);
const size_t vIdx = Index(m, vp);
if (vp->IsS())
{
if (--refCount[vIdx] > 0)
{
newVertIt->ImportData(*vp);
m.edge[i].V(j) = &*(newVertIt++);
}
}
}
}
}
return int(countSplit);
}
static void SelectNonManifoldVertexOnEdgeMesh(MeshType &m)
{
tri::RequireCompactness(m);
tri::UpdateSelection<MeshType>::VertexClear(m);
std::vector<int> cnt(m.vn,0);
for(size_t i=0;i<m.edge.size();++i)
{
cnt[tri::Index(m,m.edge[i].V(0))]++;
cnt[tri::Index(m,m.edge[i].V(1))]++;
}
for(size_t i=0;i<m.vert.size();++i)
if(cnt[i]>2) m.vert[i].SetS();
}
static void SelectCreaseVertexOnEdgeMesh(MeshType &m, ScalarType AngleRadThr)
{
tri::RequireCompactness(m);
tri::RequireVEAdjacency(m);
tri::UpdateTopology<MeshType>::VertexEdge(m);
tri::UpdateSelection<MeshType>::VertexClear(m);
for(size_t i=0;i<m.vert.size();++i)
{
std::vector<VertexPointer> VVStarVec;
edge::VVStarVE(&(m.vert[i]),VVStarVec);
if(VVStarVec.size()==2)
{
CoordType v0 = m.vert[i].P() - VVStarVec[0]->P();
CoordType v1 = m.vert[i].P() - VVStarVec[1]->P();
float angle = M_PI-vcg::Angle(v0,v1);
if(angle > AngleRadThr) m.vert[i].SetS();
}
}
}
/// Removal of faces that were incident on a non manifold edge.
// Given a mesh with FF adjacency
// it search for non manifold vertices and duplicate them.
// Duplicated vertices are moved apart according to the move threshold param.
// that is a percentage of the average vector from the non manifold vertex to the barycenter of the incident faces.
static int SplitNonManifoldVertex(MeshType& m, ScalarType moveThreshold)
{
RequireFFAdjacency(m);
typedef std::pair<FacePointer,int> FaceInt; // a face and the index of the vertex that we have to change
//
std::vector<std::pair<VertexPointer, std::vector<FaceInt> > >ToSplitVec;
SelectionStack<MeshType> ss(m);
ss.push();
CountNonManifoldVertexFF(m,true);
UpdateFlags<MeshType>::VertexClearV(m);
for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if (!fi->IsD())
{
for (int i=0; i<fi->VN(); i++)
if ((*fi).V(i)->IsS() && !(*fi).V(i)->IsV())
{
(*fi).V(i)->SetV();
face::Pos<FaceType> startPos(&*fi,i);
face::Pos<FaceType> curPos = startPos;
std::set<FaceInt> faceSet;
do
{
faceSet.insert(std::make_pair(curPos.F(),curPos.VInd()));
curPos.FlipE();
curPos.NextF();
} while (curPos != startPos);
ToSplitVec.push_back(make_pair((*fi).V(i),std::vector<FaceInt>()));
typename std::set<FaceInt>::const_iterator iii;
for(iii=faceSet.begin();iii!=faceSet.end();++iii)
ToSplitVec.back().second.push_back(*iii);
}
}
ss.pop();
// Second step actually add new vertices and split them.
typename tri::Allocator<MeshType>::template PointerUpdater<VertexPointer> pu;
VertexIterator firstVp = tri::Allocator<MeshType>::AddVertices(m,ToSplitVec.size(),pu);
for(size_t i =0;i<ToSplitVec.size();++i)
{
// qDebug("Splitting Vertex %i",ToSplitVec[i].first-&*m.vert.begin());
VertexPointer np=ToSplitVec[i].first;
pu.Update(np);
firstVp->ImportData(*np);
// loop on the face to be changed, and also compute the movement vector;
CoordType delta(0,0,0);
for(size_t j=0;j<ToSplitVec[i].second.size();++j)
{
FaceInt ff=ToSplitVec[i].second[j];
ff.first->V(ff.second)=&*firstVp;
delta+=Barycenter(*(ff.first))-np->cP();
}
delta /= ToSplitVec[i].second.size();
firstVp->P() = firstVp->P() + delta * moveThreshold;
firstVp++;
}
return int(ToSplitVec.size());
}
/// \brief This function expand current selection to cover the whole connected component.
static size_t SplitManifoldComponents(MeshType &m, const ScalarType moveThreshold = 0)
{
typedef typename MeshType::FacePointer FacePointer;
typedef typename MeshType::FaceIterator FaceIterator;
// it also assumes that the FF adjacency is well computed.
RequireFFAdjacency(m);
UpdateFlags<MeshType>::FaceClearV(m);
UpdateFlags<MeshType>::FaceClearS(m);
MeshType tmpMesh;
tmpMesh.vert.EnableVFAdjacency();
tmpMesh.face.EnableVFAdjacency();
if (m.face.IsWedgeTexCoordEnabled())
tmpMesh.face.EnableWedgeTexCoord();
size_t selCnt=0;
for(FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi)
if( !(*fi).IsD() && !(*fi).IsV() && !(*fi).IsS())
{
UpdateFlags<MeshType>::FaceClearS(m);
std::deque<FacePointer> visitStack;
visitStack.push_back(&*fi);
(*fi).SetS();
(*fi).SetV();
while(!visitStack.empty())
{
FacePointer fp = visitStack.front();
visitStack.pop_front();
for(int i=0;i<fp->VN();++i) {
FacePointer ff = fp->FFp(i);
if(face::IsManifold(*fp, i) && !ff->IsS() && !ff->IsV())
{
ff->SetS();
ff->SetV();
visitStack.push_back(ff);
}
}
}
Append<MeshType, MeshType>::Mesh(tmpMesh, m, true);
++selCnt;
}
vcg::tri::UpdateTopology<MeshType>::VertexFace(tmpMesh);
vcg::tri::UpdateFlags<MeshType>::VertexBorderFromNone(tmpMesh);
for (size_t i = 0; i < size_t(tmpMesh.VN()); ++i)
{
VertexType & v = tmpMesh.vert[i];
if (v.IsB())
{
std::vector<FacePointer> faceVec;
std::vector<int> idxVec;
vcg::face::VFStarVF(&v, faceVec, idxVec);
CoordType delta(0, 0, 0);
for (auto fp : faceVec)
{
delta += vcg::Barycenter(*fp) - v.cP();
}
delta /= faceVec.size();
v.P() += delta * moveThreshold;
}
}
UpdateSelection<MeshType>::Clear(tmpMesh);
Append<MeshType, MeshType>::MeshCopy(m, tmpMesh);
return selCnt;
}
// Auxiliary function for sorting the non manifold faces according to their area. Used in RemoveNonManifoldFace
struct CompareAreaFP {
bool operator ()(FacePointer const& f1, FacePointer const& f2) const {
return DoubleArea(*f1) < DoubleArea(*f2);
}
};
/// Removal of faces that were incident on a non manifold edge.
static int RemoveNonManifoldFace(MeshType& m)
{
FaceIterator fi;
int count_fd = 0;
std::vector<FacePointer> ToDelVec;
for(fi=m.face.begin(); fi!=m.face.end();++fi)
if (!fi->IsD())
{
if ((!IsManifold(*fi,0))||
(!IsManifold(*fi,1))||
(!IsManifold(*fi,2)))
ToDelVec.push_back(&*fi);
}
std::sort(ToDelVec.begin(),ToDelVec.end(),CompareAreaFP());
for(size_t i=0;i<ToDelVec.size();++i)
{
if(!ToDelVec[i]->IsD())
{
FaceType &ff= *ToDelVec[i];
if ((!IsManifold(ff,0))||
(!IsManifold(ff,1))||
(!IsManifold(ff,2)))
{
for(int j=0;j<3;++j)
if(!face::IsBorder<FaceType>(ff,j))
vcg::face::FFDetach<FaceType>(ff,j);
Allocator<MeshType>::DeleteFace(m,ff);
count_fd++;
}
}
}
return count_fd;
}
/* Remove the faces that are out of a given range of area */
static int RemoveFaceOutOfRangeArea(MeshType& m, ScalarType MinAreaThr=0, ScalarType MaxAreaThr=(std::numeric_limits<ScalarType>::max)(), bool OnlyOnSelected=false)
{
int count_fd = 0;
MinAreaThr*=2;
MaxAreaThr*=2;
for(FaceIterator fi=m.face.begin(); fi!=m.face.end();++fi){
if(!(*fi).IsD())
if(!OnlyOnSelected || (*fi).IsS())
{
const ScalarType doubleArea=DoubleArea<FaceType>(*fi);
if((doubleArea<=MinAreaThr) || (doubleArea>=MaxAreaThr) )
{
Allocator<MeshType>::DeleteFace(m,*fi);
count_fd++;
}
}
}
return count_fd;
}
static int RemoveZeroAreaFace(MeshType& m) { return RemoveFaceOutOfRangeArea(m,0);}
/**
* Is the mesh only composed by quadrilaterals?
*/
static bool IsBitQuadOnly(const MeshType &m)
{
typedef typename MeshType::FaceType F;
tri::RequirePerFaceFlags(m);
for (ConstFaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if (!fi->IsD()) {
unsigned int tmp = fi->Flags()&(F::FAUX0|F::FAUX1|F::FAUX2);
if ( tmp != F::FAUX0 && tmp != F::FAUX1 && tmp != F::FAUX2) return false;
}
return true;
}
static bool IsFaceFauxConsistent(MeshType &m)
{
RequirePerFaceFlags(m);
RequireFFAdjacency(m);
for(FaceIterator fi=m.face.begin();fi!=m.face.end();++fi) if(!(*fi).IsD())
{
for(int z=0;z<(*fi).VN();++z)
{
FacePointer fp = fi->FFp(z);
int zp = fi->FFi(z);
if(fi->IsF(z) != fp->IsF(zp)) return false;
}
}
return true;
}
/**
* Is the mesh only composed by triangles? (non polygonal faces)
*/
static bool IsBitTriOnly(const MeshType &m)
{
tri::RequirePerFaceFlags(m);
for (ConstFaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) {
if ( !fi->IsD() && fi->IsAnyF() ) return false;
}
return true;
}
static bool IsBitPolygonal(const MeshType &m){
return !IsBitTriOnly(m);
}
/**
* Is the mesh only composed by quadrilaterals and triangles? (no pentas, etc)
* It assumes that the bits are consistent. In that case there can be only a single faux edge.
*/
static bool IsBitTriQuadOnly(const MeshType &m)
{
tri::RequirePerFaceFlags(m);
typedef typename MeshType::FaceType F;
for (ConstFaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if (!fi->IsD()) {
unsigned int tmp = fi->cFlags()&(F::FAUX0|F::FAUX1|F::FAUX2);
if ( tmp!=F::FAUX0 && tmp!=F::FAUX1 && tmp!=F::FAUX2 && tmp!=0 ) return false;
}
return true;
}
/**
* How many quadrilaterals?
* It assumes that the bits are consistent. In that case we count the tris with a single faux edge and divide by two.
*/
static int CountBitQuads(const MeshType &m)
{
tri::RequirePerFaceFlags(m);
typedef typename MeshType::FaceType F;
int count=0;
for (ConstFaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if (!fi->IsD()) {
unsigned int tmp = fi->cFlags()&(F::FAUX0|F::FAUX1|F::FAUX2);
if ( tmp==F::FAUX0 || tmp==F::FAUX1 || tmp==F::FAUX2) count++;
}
return count / 2;
}
/**
* How many triangles? (non polygonal faces)
*/
static int CountBitTris(const MeshType &m)
{
tri::RequirePerFaceFlags(m);
int count=0;
for (ConstFaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if (!fi->IsD()) {
if (!(fi->IsAnyF())) count++;
}
return count;
}
/**
* How many polygons of any kind? (including triangles)
* it assumes that there are no faux vertexes (e.g vertices completely surrounded by faux edges)
*/
static int CountBitPolygons(const MeshType &m)
{
tri::RequirePerFaceFlags(m);
int count = 0;
for (ConstFaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if (!fi->IsD()) {
if (fi->IsF(0)) count++;
if (fi->IsF(1)) count++;
if (fi->IsF(2)) count++;
}
return m.fn - count/2;
}
/**
* The number of polygonal faces is
* FN - EN_f (each faux edge hides exactly one triangular face or in other words a polygon of n edges has n-3 faux edges.)
* In the general case where a The number of polygonal faces is
* FN - EN_f + VN_f
* where:
* EN_f is the number of faux edges.
* VN_f is the number of faux vertices (e.g vertices completely surrounded by faux edges)
* as a intuitive proof think to a internal vertex that is collapsed onto a border of a polygon:
* it deletes 2 faces, 1 faux edges and 1 vertex so to keep the balance you have to add back the removed vertex.
*/
static int CountBitLargePolygons(const MeshType &m)
{
//note - using unordered_map to set visited vertices because
//the mesh is const (before, the function used vertex flags...).
//could be used std::vector<bool> if the vertex has the Index()
//member function...
std::unordered_map<ConstVertexPointer, bool> vertVisited;
for (ConstVertexIterator vi = m.vert.begin(); vi != m.vert.end(); ++vi)
if (!vi->IsD()) vertVisited[&(*vi)] = true;
// First loop Clear all referenced vertices
for (ConstFaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi)
if (!fi->IsD())
for(int i=0;i<3;++i){
vertVisited[fi->V(i)] = false;
}
// Second Loop, count (twice) faux edges and mark all vertices touched by non faux edges
// (e.g vertexes on the boundary of a polygon)
int countE = 0;
for (ConstFaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi)
if (!fi->IsD()) {
for(int i=0;i<3;++i)
{
if (fi->IsF(i))
countE++;
else
{
vertVisited[fi->V0(i)] = true;
vertVisited[fi->V1(i)] = true;
}
}
}
// Third Loop, count the number of referenced vertexes that are completely surrounded by faux edges.
int countV = 0;
for (ConstVertexIterator vi = m.vert.begin(); vi != m.vert.end(); ++vi)
if (!vi->IsD() && !(vertVisited[&(*vi)])) countV++;
return m.fn - countE/2 + countV ;
}
/**
* Checks that the mesh has consistent per-face faux edges
* (the ones that merges triangles into larger polygons).
* A border edge should never be faux, and faux edges should always be
* reciprocated by another faux edges.
* It requires FF adjacency.
*/
static bool HasConsistentPerFaceFauxFlag(const MeshType &m)
{
RequireFFAdjacency(m);
RequirePerFaceFlags(m);
for (ConstFaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi)
if(!(*fi).IsD())
for (int k=0; k<3; k++)
if( ( fi->IsF(k) != fi->cFFp(k)->IsF(fi->cFFi(k)) ) ||
( fi->IsF(k) && face::IsBorder(*fi,k)) )
{
return false;
}
return true;
}
/**
* Count the number of non manifold edges in a polylinemesh, e.g. the edges where there are more than 2 incident faces.
*
*/
static int CountNonManifoldEdgeEE( MeshType & m, bool SelectFlag=false)
{
MeshAssert<MeshType>::OnlyEdgeMesh(m);
RequireEEAdjacency(m);
tri::UpdateTopology<MeshType>::EdgeEdge(m);
if(SelectFlag) UpdateSelection<MeshType>::VertexClear(m);
int nonManifoldCnt=0;
SimpleTempData<typename MeshType::VertContainer, int > TD(m.vert,0);
// First Loop, just count how many faces are incident on a vertex and store it in the TemporaryData Counter.
EdgeIterator ei;
for (ei = m.edge.begin(); ei != m.edge.end(); ++ei) if (!ei->IsD())
{
TD[(*ei).V(0)]++;
TD[(*ei).V(1)]++;
}
tri::UpdateFlags<MeshType>::VertexClearV(m);
// Second Loop, Check that each vertex have been seen 1 or 2 times.
for (VertexIterator vi = m.vert.begin(); vi != m.vert.end(); ++vi) if (!vi->IsD())
{
if( TD[vi] >2 )
{
if(SelectFlag) (*vi).SetS();
nonManifoldCnt++;
}
}
return nonManifoldCnt;
}
/**
* Count the number of non manifold edges in a mesh, e.g. the edges where there are more than 2 incident faces.
*
* Note that this test is not enough to say that a mesh is two manifold,
* you have to count also the non manifold vertexes.
*/
static int CountNonManifoldEdgeFF( MeshType & m, bool SelectFlag=false)
{
RequireFFAdjacency(m);
int nmfBit[3];
nmfBit[0]= FaceType::NewBitFlag();
nmfBit[1]= FaceType::NewBitFlag();
nmfBit[2]= FaceType::NewBitFlag();
UpdateFlags<MeshType>::FaceClear(m,nmfBit[0]+nmfBit[1]+nmfBit[2]);
if(SelectFlag){
UpdateSelection<MeshType>::VertexClear(m);
UpdateSelection<MeshType>::FaceClear(m);
}
int edgeCnt = 0;
for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi)
{
if (!fi->IsD())
{
for(int i=0;i<3;++i)
if(!IsManifold(*fi,i))
{
if(!(*fi).IsUserBit(nmfBit[i]))
{
++edgeCnt;
if(SelectFlag)
{
(*fi).V0(i)->SetS();
(*fi).V1(i)->SetS();
}
// follow the ring of faces incident on edge i;
face::Pos<FaceType> nmf(&*fi,i);
do
{
if(SelectFlag) nmf.F()->SetS();
nmf.F()->SetUserBit(nmfBit[nmf.E()]);
nmf.NextF();
}
while(nmf.f != &*fi);
}
}
}
}
return edgeCnt;
}
/** Count (and eventually select) non 2-Manifold vertexes of a mesh
* e.g. the vertices with a non 2-manif. neighbourhood but that do not belong to not 2-manif edges.
* typical situation two cones connected by one vertex.
*/
static int CountNonManifoldVertexFF( MeshType & m, bool selectVert = true, bool clearSelection = true)
{
RequireFFAdjacency(m);
if(selectVert && clearSelection) UpdateSelection<MeshType>::VertexClear(m);
int nonManifoldCnt=0;
SimpleTempData<typename MeshType::VertContainer, int > TD(m.vert,0);
// First Loop, just count how many faces are incident on a vertex and store it in the TemporaryData Counter.
FaceIterator fi;
for (fi = m.face.begin(); fi != m.face.end(); ++fi) if (!fi->IsD())
{
for (int k=0; k<fi->VN(); k++)
{
TD[(*fi).V(k)]++;
}
}
tri::UpdateFlags<MeshType>::VertexClearV(m);
// Second Loop.
// mark out of the game the vertexes that are incident on non manifold edges.
for (fi = m.face.begin(); fi != m.face.end(); ++fi) if (!fi->IsD())
{
for(int i=0; i<fi->VN(); ++i)
if (!IsManifold(*fi,i))
{
(*fi).V0(i)->SetV();
(*fi).V1(i)->SetV();
}
}
// Third Loop, for safe vertexes, check that the number of faces that you can reach starting
// from it and using FF is the same of the previously counted.
for (fi = m.face.begin(); fi != m.face.end(); ++fi) if (!fi->IsD())
{
for(int i=0; i<fi->VN(); i++) if (!(*fi).V(i)->IsV())
{
(*fi).V(i)->SetV();
face::Pos<FaceType> pos(&(*fi),i);
int starSizeFF = pos.NumberOfIncidentFaces();
if (starSizeFF != TD[(*fi).V(i)])
{
if (selectVert)
(*fi).V(i)->SetS();
nonManifoldCnt++;
}
}
}
return nonManifoldCnt;
}
/// Very simple test of water tightness. No boundary and no non manifold edges.
/// Assume that it is orientable.
/// It could be debated if a closed non orientable surface is watertight or not.
///
/// The rationale of not testing orientability here is that
/// it requires FFAdj while this test do not require any adjacency.
///
static bool IsWaterTight(MeshType & m)
{
int edgeNum=0,edgeBorderNum=0,edgeNonManifNum=0;
CountEdgeNum(m, edgeNum, edgeBorderNum,edgeNonManifNum);
return (edgeBorderNum==0) && (edgeNonManifNum==0);
}
static void CountEdgeNum( MeshType & m, int &total_e, int &boundary_e, int &non_manif_e )
{
std::vector< typename tri::UpdateTopology<MeshType>::PEdge > edgeVec;
tri::UpdateTopology<MeshType>::FillEdgeVector(m,edgeVec,true);
sort(edgeVec.begin(), edgeVec.end()); // Lo ordino per vertici
total_e=0;
boundary_e=0;
non_manif_e=0;
size_t f_on_cur_edge =1;
for(size_t i=0;i<edgeVec.size();++i)
{
if(( (i+1) == edgeVec.size()) || !(edgeVec[i] == edgeVec[i+1]))
{
++total_e;
if(f_on_cur_edge==1)
++boundary_e;
if(f_on_cur_edge>2)
++non_manif_e;
f_on_cur_edge=1;
}
else
{
++f_on_cur_edge;
}
} // end for
}
static int CountHoles( MeshType & m)
{
UpdateFlags<MeshType>::FaceClearV(m);
int loopNum=0;
for(FaceIterator fi=m.face.begin(); fi!=m.face.end();++fi) if(!fi->IsD())
{
for(int j=0;j<3;++j)
{
if(!fi->IsV() && face::IsBorder(*fi,j))
{
face::Pos<FaceType> startPos(&*fi,j);
face::Pos<FaceType> curPos=startPos;
do
{
curPos.NextB();
curPos.F()->SetV();
}
while(curPos!=startPos);
++loopNum;
}
}
}
return loopNum;
}
/*
Compute the set of connected components of a given mesh
it fills a vector of pair < int , faceptr > with, for each connecteed component its size and a represnant
*/
static int CountConnectedComponents(MeshType &m)
{
std::vector< std::pair<int,FacePointer> > CCV;
return ConnectedComponents(m,CCV);
}
static int ConnectedComponents(MeshType &m, std::vector< std::pair<int,FacePointer> > &CCV)
{
tri::RequireFFAdjacency(m);
CCV.clear();
tri::UpdateFlags<MeshType>::FaceClearV(m);
std::stack<FacePointer> sf;
FacePointer fpt=&*(m.face.begin());
for(FaceIterator fi=m.face.begin();fi!=m.face.end();++fi)
{
if(!((*fi).IsD()) && !(*fi).IsV())
{
(*fi).SetV();
CCV.push_back(std::make_pair(0,&*fi));
sf.push(&*fi);
while (!sf.empty())
{
fpt=sf.top();
++CCV.back().first;
sf.pop();
for(int j=0; j<fpt->VN(); ++j)
{
if( !face::IsBorder(*fpt,j) )
{
FacePointer l = fpt->FFp(j);
if( !(*l).IsV() )
{
(*l).SetV();
sf.push(l);
}
}
}
}
}
}
return int(CCV.size());
}
static int edgeMeshConnectedComponents(MeshType & poly, std::vector<std::pair<int, typename MeshType::EdgePointer> > &eCC)
{
typedef typename MeshType::EdgePointer EdgePointer;
tri::UpdateTopology<MeshType>::VertexEdge(poly);
tri::UpdateFlags<MeshType>::EdgeClear(poly);
eCC.clear();
std::stack<EdgePointer> stack;
for (auto ei = poly.edge.begin(); ei != poly.edge.end(); ++ei)
if (!ei->IsD() && !ei->IsV())
{
ei->SetV();
std::pair<int, EdgePointer> cc(1, &*ei);
stack.push(&*ei);
while (!stack.empty())
{
EdgePointer ep = stack.top();
stack.pop();
for (int i = 0; i < 2; ++i)
{
edge::VEIterator<typename MeshType::EdgeType> vei(ep->V(i));
while (!vei.End())
{
if (!vei.E()->IsV())
{
vei.E()->SetV();
stack.push(vei.E());
cc.first += 1;
}
++vei;
}
}
}
eCC.push_back(cc);
}
return int(eCC.size());
}
static void ComputeValence( MeshType &m, typename MeshType::PerVertexIntHandle &h)
{
for(VertexIterator vi=m.vert.begin(); vi!= m.vert.end();++vi)
h[vi]=0;
for(FaceIterator fi=m.face.begin();fi!=m.face.end();++fi)
{
if(!((*fi).IsD()))
for(int j=0;j<fi->VN();j++)
++h[tri::Index(m,fi->V(j))];
}
}
/**
GENUS.
A topologically invariant property of a surface defined as
the largest number of non-intersecting simple closed curves that can be
drawn on the surface without separating it.
Roughly speaking, it is the number of holes in a surface.
The genus g of a closed surface, also called the geometric genus, is related to the
Euler characteristic by the relation $chi$ by $chi==2-2g$.
The genus of a connected, orientable surface is an integer representing the maximum
number of cuttings along closed simple curves without rendering the resultant
manifold disconnected. It is equal to the number of handles on it.
For general polyhedra the <em>Euler Formula</em> is:
V - E + F = 2 - 2G - B
where V is the number of vertices, F is the number of faces, E is the
number of edges, G is the genus and B is the number of <em>boundary polygons</em>.
The above formula is valid for a mesh with one single connected component.
By considering multiple connected components the formula becomes:
V - E + F = 2C - 2Gs - B -> 2Gs = - ( V-E+F +B -2C)
where C is the number of connected components and Gs is the sum of
the genus of all connected components.
Note that in the case of a mesh with boundaries the intuitive meaning of Genus is less intuitive that it could seem.
A closed sphere, a sphere with one hole (e.g. a disk) and a sphere with two holes (e.g. a tube) all of them have Genus == 0
*/
static int MeshGenus(int nvert,int nedges,int nfaces, int numholes, int numcomponents)
{
return -((nvert + nfaces - nedges + numholes - 2 * numcomponents) / 2);
}
static int MeshGenus(MeshType &m)
{
int nvert=m.vn;
int nfaces=m.fn;
int boundary_e,total_e,nonmanif_e;
CountEdgeNum(m,total_e,boundary_e,nonmanif_e);
int numholes=CountHoles(m);
int numcomponents=CountConnectedComponents(m);
int G=MeshGenus(nvert,total_e,nfaces,numholes,numcomponents);
return G;
}
/**
* Check if the given mesh is regular, semi-regular or irregular.
*
* Each vertex of a \em regular mesh has valence 6 except for border vertices
* which have valence 4.
*
* A \em semi-regular mesh is derived from an irregular one applying
* 1-to-4 subdivision recursively. (not checked for now)
*
* All other meshes are \em irregular.
*/
static void IsRegularMesh(MeshType &m, bool &Regular, bool &Semiregular)
{
RequireVFAdjacency(m);
Regular = true;
VertexIterator vi;
// for each vertex the number of edges are count
for (vi = m.vert.begin(); vi != m.vert.end(); ++vi)
{
if (!vi->IsD())
{
face::Pos<FaceType> he((*vi).VFp(), &*vi);
face::Pos<FaceType> ht = he;
int n=0;
bool border=false;
do
{
++n;
ht.NextE();
if (ht.IsBorder())
border=true;
}
while (ht != he);
if (border)
n = n/2;
if ((n != 6)&&(!border && n != 4))
{
Regular = false;
break;
}
}
}
if (!Regular)
Semiregular = false;
else
{
// For now we do not account for semi-regularity
Semiregular = false;
}
}
static bool IsCoherentlyOrientedMesh(MeshType &m)
{
RequireFFAdjacency(m);
MeshAssert<MeshType>::FFAdjacencyIsInitialized(m);
for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi)
if (!fi->IsD())
for(int i=0;i<3;++i)
if(!face::CheckOrientation(*fi,i))
return false;
return true;
}
static void OrientCoherentlyMesh(MeshType &m, bool &_IsOriented, bool &_IsOrientable)
{
RequireFFAdjacency(m);
MeshAssert<MeshType>::FFAdjacencyIsInitialized(m);
bool IsOrientable = true;
bool IsOriented = true;
UpdateFlags<MeshType>::FaceClearV(m);
std::stack<FacePointer> faces;
for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi)
{
if (!fi->IsD() && !fi->IsV())
{
// each face put in the stack is selected (and oriented)
fi->SetV();
faces.push(&(*fi));
while (!faces.empty())
{
FacePointer fp = faces.top();
faces.pop();
// make consistently oriented the adjacent faces
for (int j = 0; j < 3; j++)
{
if (!face::IsBorder(*fp,j) && face::IsManifold<FaceType>(*fp, j))
{
FacePointer fpaux = fp->FFp(j);
int iaux = fp->FFi(j);
if (!CheckOrientation(*fpaux, iaux))
{
IsOriented = false;
if (!fpaux->IsV())
face::SwapEdge<FaceType,true>(*fpaux, iaux);
else
{
IsOrientable = false;
break;
}
}
if (!fpaux->IsV())
{
fpaux->SetV();
faces.push(fpaux);
}
}
}
}
}
if (!IsOrientable) break;
}
_IsOriented = IsOriented;
_IsOrientable = IsOrientable;
}
/// Flip the orientation of the whole mesh flipping all the faces (by swapping the first two vertices)
static void FlipMesh(MeshType &m, bool selected=false)
{
for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi) if(!(*fi).IsD())
if(!selected || (*fi).IsS())
{
face::SwapEdge<FaceType,false>((*fi), 0);
if (HasPerWedgeTexCoord(m))
std::swap((*fi).WT(0),(*fi).WT(1));
}
}
/// Flip a mesh so that its normals are orented outside.
/// Just for safety it uses a voting scheme.
/// It assumes that
/// mesh has already has coherent normals.
/// mesh is watertight and signle component.
static bool FlipNormalOutside(MeshType &m)
{
if(m.vert.empty()) return false;
tri::UpdateNormal<MeshType>::PerVertexAngleWeighted(m);
tri::UpdateNormal<MeshType>::NormalizePerVertex(m);
std::vector< VertexPointer > minVertVec;
std::vector< VertexPointer > maxVertVec;
// The set of directions to be chosen
std::vector< CoordType > dirVec;
dirVec.push_back(CoordType(1,0,0));
dirVec.push_back(CoordType(0,1,0));
dirVec.push_back(CoordType(0,0,1));
dirVec.push_back(CoordType( 1, 1,1));
dirVec.push_back(CoordType(-1, 1,1));
dirVec.push_back(CoordType(-1,-1,1));
dirVec.push_back(CoordType( 1,-1,1));
for(size_t i=0;i<dirVec.size();++i)
{
Normalize(dirVec[i]);
minVertVec.push_back(&*m.vert.begin());
maxVertVec.push_back(&*m.vert.begin());
}
for (VertexIterator vi = m.vert.begin(); vi != m.vert.end(); ++vi) if(!(*vi).IsD())
{
for(size_t i=0;i<dirVec.size();++i)
{
if( (*vi).cP().dot(dirVec[i]) < minVertVec[i]->P().dot(dirVec[i])) minVertVec[i] = &*vi;
if( (*vi).cP().dot(dirVec[i]) > maxVertVec[i]->P().dot(dirVec[i])) maxVertVec[i] = &*vi;
}
}
int voteCount=0;
ScalarType angleThreshold = cos(math::ToRad(85.0));
for(size_t i=0;i<dirVec.size();++i)
{
// qDebug("Min vert along (%f %f %f) is %f %f %f",dirVec[i][0],dirVec[i][1],dirVec[i][2],minVertVec[i]->P()[0],minVertVec[i]->P()[1],minVertVec[i]->P()[2]);
// qDebug("Max vert along (%f %f %f) is %f %f %f",dirVec[i][0],dirVec[i][1],dirVec[i][2],maxVertVec[i]->P()[0],maxVertVec[i]->P()[1],maxVertVec[i]->P()[2]);
if(minVertVec[i]->N().dot(dirVec[i]) > angleThreshold ) voteCount++;
if(maxVertVec[i]->N().dot(dirVec[i]) < -angleThreshold ) voteCount++;
}
// qDebug("votecount = %i",voteCount);
if(voteCount < int(dirVec.size())/2) return false;
FlipMesh(m);
return true;
}
// Search and remove small single triangle folds
// - a face has normal opposite to all other faces
// - choose the edge that brings to the face f1 containing the vertex opposite to that edge.
static int RemoveFaceFoldByFlip(MeshType &m, float normalThresholdDeg=175, bool repeat=true)
{
RequireFFAdjacency(m);
RequirePerVertexMark(m);
//Counters for logging and convergence
int count, total = 0;
do {
tri::UpdateTopology<MeshType>::FaceFace(m);
tri::UnMarkAll(m);
count = 0;
ScalarType NormalThrRad = math::ToRad(normalThresholdDeg);
ScalarType eps = ScalarType(0.0001); // this epsilon value is in absolute value. It is a distance from edge in baricentric coords.
//detection stage
for(FaceIterator fi=m.face.begin();fi!= m.face.end();++fi ) if(!(*fi).IsV())
{ Point3<ScalarType> NN = vcg::TriangleNormal((*fi)).Normalize();
if( vcg::AngleN(NN,TriangleNormal(*(*fi).FFp(0)).Normalize()) > NormalThrRad &&
vcg::AngleN(NN,TriangleNormal(*(*fi).FFp(1)).Normalize()) > NormalThrRad &&
vcg::AngleN(NN,TriangleNormal(*(*fi).FFp(2)).Normalize()) > NormalThrRad )
{
(*fi).SetS();
//(*fi).C()=Color4b(Color4b::Red);
// now search the best edge to flip
for(int i=0;i<3;i++)
{
Point3<ScalarType> &p=(*fi).P2(i);
Point3<ScalarType> L;
bool ret = vcg::InterpolationParameters((*(*fi).FFp(i)),TriangleNormal(*(*fi).FFp(i)),p,L);
if(ret && L[0]>eps && L[1]>eps && L[2]>eps)
{
(*fi).FFp(i)->SetS();
(*fi).FFp(i)->SetV();
//(*fi).FFp(i)->C()=Color4b(Color4b::Green);
if(face::CheckFlipEdge<FaceType>( *fi, i )) {
face::FlipEdge<FaceType>( *fi, i );
++count; ++total;
}
}
}
}
}
// tri::UpdateNormal<MeshType>::PerFace(m);
}
while( repeat && count );
return total;
}
static int RemoveTVertexByFlip(MeshType &m, float threshold=40, bool repeat=true)
{
RequireFFAdjacency(m);
RequirePerVertexMark(m);
//Counters for logging and convergence
int count, total = 0;
do {
tri::UpdateTopology<MeshType>::FaceFace(m);
tri::UnMarkAll(m);
count = 0;
//detection stage
for(unsigned int index = 0 ; index < m.face.size(); ++index )
{
FacePointer f = &(m.face[index]); float sides[3]; CoordType dummy;
sides[0] = Distance(f->P(0), f->P(1));
sides[1] = Distance(f->P(1), f->P(2));
sides[2] = Distance(f->P(2), f->P(0));
// Find largest triangle side
int i = std::find(sides, sides+3, std::max( std::max(sides[0],sides[1]), sides[2])) - (sides);
if( tri::IsMarked(m,f->V2(i) )) continue;
if( PSDist(f->P2(i),f->P(i),f->P1(i),dummy)*threshold <= sides[i] )
{
tri::Mark(m,f->V2(i));
if(face::CheckFlipEdge<FaceType>( *f, i )) {
// Check if EdgeFlipping improves quality
FacePointer g = f->FFp(i); int k = f->FFi(i);
Triangle3<ScalarType> t1(f->P(i), f->P1(i), f->P2(i)), t2(g->P(k), g->P1(k), g->P2(k)),
t3(f->P(i), g->P2(k), f->P2(i)), t4(g->P(k), f->P2(i), g->P2(k));
if ( std::min( QualityFace(t1), QualityFace(t2) ) < std::min( QualityFace(t3), QualityFace(t4) ))
{
face::FlipEdge<FaceType>( *f, i );
++count; ++total;
}
}
}
}
// tri::UpdateNormal<MeshType>::PerFace(m);
}
while( repeat && count );
return total;
}
static int RemoveTVertexByCollapse(MeshType &m, float threshold=40, bool repeat=true)
{
RequirePerVertexMark(m);
//Counters for logging and convergence
int count, total = 0;
do {
tri::UnMarkAll(m);
count = 0;
//detection stage
for(unsigned int index = 0 ; index < m.face.size(); ++index )
{
FacePointer f = &(m.face[index]);
float sides[3];
CoordType dummy;
sides[0] = Distance(f->P(0), f->P(1));
sides[1] = Distance(f->P(1), f->P(2));
sides[2] = Distance(f->P(2), f->P(0));
int i = std::find(sides, sides+3, std::max( std::max(sides[0],sides[1]), sides[2])) - (sides);
if( tri::IsMarked(m,f->V2(i) )) continue;
if( PSDist(f->P2(i),f->P(i),f->P1(i),dummy)*threshold <= sides[i] )
{
tri::Mark(m,f->V2(i));
int j = Distance(dummy,f->P(i))<Distance(dummy,f->P1(i))?i:(i+1)%3;
f->P2(i) = f->P(j); tri::Mark(m,f->V(j));
++count; ++total;
}
}
tri::Clean<MeshType>::RemoveDuplicateVertex(m);
tri::Allocator<MeshType>::CompactFaceVector(m);
tri::Allocator<MeshType>::CompactVertexVector(m);
}
while( repeat && count );
return total;
}
static bool SelfIntersections(MeshType &m, std::vector<FaceType*> &ret)
{
RequirePerFaceMark(m);
ret.clear();
int referredBit = FaceType::NewBitFlag();
tri::UpdateFlags<MeshType>::FaceClear(m,referredBit);
TriMeshGrid gM;
gM.Set(m.face.begin(),m.face.end());
for(FaceIterator fi=m.face.begin();fi!=m.face.end();++fi) if(!(*fi).IsD())
{
(*fi).SetUserBit(referredBit);
Box3< ScalarType> bbox;
(*fi).GetBBox(bbox);
std::vector<FaceType*> inBox;
vcg::tri::GetInBoxFace(m, gM, bbox,inBox);
bool Intersected=false;
typename std::vector<FaceType*>::iterator fib;
for(fib=inBox.begin();fib!=inBox.end();++fib)
{
if(!(*fib)->IsUserBit(referredBit) && (*fib != &*fi) )
if(Clean<MeshType>::TestFaceFaceIntersection(&*fi,*fib)){
ret.push_back(*fib);
if(!Intersected) {
ret.push_back(&*fi);
Intersected=true;
}
}
}
inBox.clear();
}
FaceType::DeleteBitFlag(referredBit);
return (ret.size()>0);
}
/**
This function simply test that the vn and fn counters be consistent with the size of the containers and the number of deleted simplexes.
*/
static bool IsSizeConsistent(MeshType &m)
{
int DeletedVertNum=0;
for (VertexIterator vi = m.vert.begin(); vi != m.vert.end(); ++vi)
if((*vi).IsD()) DeletedVertNum++;
int DeletedEdgeNum=0;
for (EdgeIterator ei = m.edge.begin(); ei != m.edge.end(); ++ei)
if((*ei).IsD()) DeletedEdgeNum++;
int DeletedFaceNum=0;
for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi)
if((*fi).IsD()) DeletedFaceNum++;
if(size_t(m.vn+DeletedVertNum) != m.vert.size()) return false;
if(size_t(m.en+DeletedEdgeNum) != m.edge.size()) return false;
if(size_t(m.fn+DeletedFaceNum) != m.face.size()) return false;
return true;
}
/**
This function simply test that all the faces have a consistent face-face topology relation.
useful for checking that a topology modifying algorithm does not mess something.
*/
static bool IsFFAdjacencyConsistent(MeshType &m)
{
RequireFFAdjacency(m);
for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi)
if(!(*fi).IsD())
{
for(int i=0;i<3;++i)
if(!FFCorrectness(*fi, i)) return false;
}
return true;
}
/**
This function simply test that a mesh has some reasonable tex coord.
*/
static bool HasConsistentPerWedgeTexCoord(MeshType &m)
{
tri::RequirePerFaceWedgeTexCoord(m);
for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi)
if(!(*fi).IsD())
{ FaceType &f=(*fi);
if( ! ( (f.WT(0).N() == f.WT(1).N()) && (f.WT(0).N() == (*fi).WT(2).N()) ) )
return false; // all the vertices must have the same index.
if((*fi).WT(0).N() <0) return false; // no undefined texture should be allowed
}
return true;
}
/**
Simple check that there are no face with all collapsed tex coords.
*/
static bool HasZeroTexCoordFace(MeshType &m)
{
tri::RequirePerFaceWedgeTexCoord(m);
for (FaceIterator fi = m.face.begin(); fi != m.face.end(); ++fi)
if(!(*fi).IsD())
{
if( (*fi).WT(0).P() == (*fi).WT(1).P() && (*fi).WT(0).P() == (*fi).WT(2).P() ) return false;
}
return true;
}
/**
This function test if two triangular faces of a mesh intersect.
It assumes that the faces (as storage) are different (e.g different address)
If the two faces are different but coincident (same set of vertexes) return true.
if the faces share an edge no test is done.
if the faces share only a vertex, the opposite edge is tested against the face
*/
static bool TestFaceFaceIntersection(FaceType *f0,FaceType *f1)
{
int sv = face::CountSharedVertex(f0,f1);
if(sv==3) return true;
if(sv==0) return (vcg::IntersectionTriangleTriangle<FaceType>((*f0),(*f1)));
// if the faces share only a vertex, the opposite edge (as a segment) is tested against the face
// to avoid degenerate cases where the two triangles have the opposite edge on a common plane
// we offset the segment to test toward the shared vertex
if(sv==1)
{
int i0,i1; ScalarType a,b;
face::FindSharedVertex(f0,f1,i0,i1);
CoordType shP = f0->V(i0)->P()*0.5;
if(vcg::IntersectionSegmentTriangle(Segment3<ScalarType>((*f0).V1(i0)->P()*0.5+shP,(*f0).V2(i0)->P()*0.5+shP), *f1, a, b) )
{
// a,b are the param coords of the intersection point of the segment.
if(a+b>=1 || a<=EPSIL || b<=EPSIL ) return false;
return true;
}
if(vcg::IntersectionSegmentTriangle(Segment3<ScalarType>((*f1).V1(i1)->P()*0.5+shP,(*f1).V2(i1)->P()*0.5+shP), *f0, a, b) )
{
// a,b are the param coords of the intersection point of the segment.
if(a+b>=1 || a<=EPSIL || b<=EPSIL ) return false;
return true;
}
}
return false;
}
/**
This function merge all the vertices that are closer than the given radius
*/
static int MergeCloseVertex(MeshType &m, const ScalarType radius)
{
int mergedCnt=0;
mergedCnt = ClusterVertex(m,radius);
RemoveDuplicateVertex(m,true);
return mergedCnt;
}
static int ClusterVertex(MeshType &m, const ScalarType radius)
{
if(m.vn==0) return 0;
// some spatial indexing structure does not work well with deleted vertices...
tri::Allocator<MeshType>::CompactVertexVector(m);
typedef vcg::SpatialHashTable<VertexType, ScalarType> SampleSHT;
SampleSHT sht;
tri::EmptyTMark<MeshType> markerFunctor;
std::vector<VertexType*> closests;
int mergedCnt=0;
sht.Set(m.vert.begin(), m.vert.end());
UpdateFlags<MeshType>::VertexClearV(m);
for(VertexIterator viv = m.vert.begin(); viv!= m.vert.end(); ++viv)
if(!(*viv).IsD() && !(*viv).IsV())
{
(*viv).SetV();
Point3<ScalarType> p = viv->cP();
Box3<ScalarType> bb(p-Point3<ScalarType>(radius,radius,radius),p+Point3<ScalarType>(radius,radius,radius));
GridGetInBox(sht, markerFunctor, bb, closests);
// qDebug("Vertex %i has %i closest", &*viv - &*m.vert.begin(),closests.size());
for(size_t i=0; i<closests.size(); ++i)
{
ScalarType dist = Distance(p,closests[i]->cP());
if(dist < radius && !closests[i]->IsV())
{
// printf("%f %f \n",dist,radius);
mergedCnt++;
closests[i]->SetV();
closests[i]->P()=p;
}
}
}
return mergedCnt;
}
static std::pair<int,int> RemoveSmallConnectedComponentsSize(MeshType &m, int maxCCSize)
{
std::vector< std::pair<int, typename MeshType::FacePointer> > CCV;
int TotalCC=ConnectedComponents(m, CCV);
int DeletedCC=0;
ConnectedComponentIterator<MeshType> ci;
for(unsigned int i=0;i<CCV.size();++i)
{
std::vector<typename MeshType::FacePointer> FPV;
if(CCV[i].first<maxCCSize)
{
DeletedCC++;
for(ci.start(m,CCV[i].second);!ci.completed();++ci)
FPV.push_back(*ci);
typename std::vector<typename MeshType::FacePointer>::iterator fpvi;
for(fpvi=FPV.begin(); fpvi!=FPV.end(); ++fpvi)
Allocator<MeshType>::DeleteFace(m,(**fpvi));
}
}
return std::make_pair(TotalCC,DeletedCC);
}
/// Remove the connected components smaller than a given diameter
// it returns a pair with the number of connected components and the number of deleted ones.
static std::pair<int,int> RemoveSmallConnectedComponentsDiameter(MeshType &m, ScalarType maxDiameter)
{
std::vector< std::pair<int, typename MeshType::FacePointer> > CCV;
int TotalCC=ConnectedComponents(m, CCV);
int DeletedCC=0;
tri::ConnectedComponentIterator<MeshType> ci;
for(unsigned int i=0;i<CCV.size();++i)
{
Box3<ScalarType> bb;
std::vector<typename MeshType::FacePointer> FPV;
for(ci.start(m,CCV[i].second);!ci.completed();++ci)
{
FPV.push_back(*ci);
bb.Add((*ci)->P(0));
bb.Add((*ci)->P(1));
bb.Add((*ci)->P(2));
}
if(bb.Diag()<maxDiameter)
{
DeletedCC++;
typename std::vector<typename MeshType::FacePointer>::iterator fpvi;
for(fpvi=FPV.begin(); fpvi!=FPV.end(); ++fpvi)
tri::Allocator<MeshType>::DeleteFace(m,(**fpvi));
}
}
return std::make_pair(TotalCC,DeletedCC);
}
/// Remove the connected components greater than a given diameter
// it returns a pair with the number of connected components and the number of deleted ones.
static std::pair<int,int> RemoveHugeConnectedComponentsDiameter(MeshType &m, ScalarType minDiameter)
{
std::vector< std::pair<int, typename MeshType::FacePointer> > CCV;
int TotalCC=ConnectedComponents(m, CCV);
int DeletedCC=0;
tri::ConnectedComponentIterator<MeshType> ci;
for(unsigned int i=0;i<CCV.size();++i)
{
Box3f bb;
std::vector<typename MeshType::FacePointer> FPV;
for(ci.start(m,CCV[i].second);!ci.completed();++ci)
{
FPV.push_back(*ci);
bb.Add((*ci)->P(0));
bb.Add((*ci)->P(1));
bb.Add((*ci)->P(2));
}
if(bb.Diag()>minDiameter)
{
DeletedCC++;
typename std::vector<typename MeshType::FacePointer>::iterator fpvi;
for(fpvi=FPV.begin(); fpvi!=FPV.end(); ++fpvi)
tri::Allocator<MeshType>::DeleteFace(m,(**fpvi));
}
}
return std::make_pair(TotalCC,DeletedCC);
}
/**
Select the folded faces using an angle threshold on the face normal.
The face is selected if the dot product between the face normal and the normal of the plane fitted
using the vertices of the one ring faces is below the cosThreshold.
The cosThreshold requires a negative cosine value (a positive value is clamp to zero).
*/
static void SelectFoldedFaceFromOneRingFaces(MeshType &m, ScalarType cosThreshold)
{
typedef std::unordered_set<VertexPointer> VertexSet;
tri::RequireVFAdjacency(m);
tri::RequirePerFaceNormal(m);
tri::RequirePerVertexNormal(m);
vcg::tri::UpdateSelection<MeshType>::FaceClear(m);
vcg::tri::UpdateNormal<MeshType>::PerFaceNormalized(m);
vcg::tri::UpdateNormal<MeshType>::PerVertexNormalized(m);
vcg::tri::UpdateTopology<MeshType>::VertexFace(m);
if (cosThreshold > 0)
cosThreshold = 0;
#pragma omp parallel for schedule(dynamic, 10)
for (int i = 0; i < m.face.size(); i++)
{
VertexSet nearVertex;
std::vector<CoordType> pointVec;
FacePointer f = &m.face[i];
for (int j = 0; j < 3; j++)
{
std::vector<VertexPointer> temp;
vcg::face::VVStarVF<FaceType>(f->V(j), temp);
typename std::vector<VertexPointer>::iterator iter = temp.begin();
for (; iter != temp.end(); iter++)
{
if ((*iter) != f->V1(j) && (*iter) != f->V2(j))
{
if (nearVertex.insert((*iter)).second)
pointVec.push_back((*iter)->P());
}
}
nearVertex.insert(f->V(j));
pointVec.push_back(f->P(j));
}
if (pointVec.size() > 3)
{
vcg::Plane3<ScalarType> plane;
vcg::FitPlaneToPointSet(pointVec, plane);
float avgDot = 0;
for (auto nvp : nearVertex)
avgDot += plane.Direction().dot(nvp->N());
avgDot /= nearVertex.size();
typename MeshType::VertexType::NormalType normal;
if (avgDot < 0)
normal = -plane.Direction();
else
normal = plane.Direction();
if (normal.dot(f->N()) < cosThreshold)
f->SetS();
}
}
}
/**
Select the faces on the first mesh that intersect the second mesh.
It uses a grid for querying so a face::mark should be added.
*/
static int SelectIntersectingFaces(MeshType &m1, MeshType &m2)
{
RequirePerFaceMark(m2);
RequireCompactness(m1);
RequireCompactness(m2);
tri::UpdateSelection<MeshType>::FaceClear(m1);
TriMeshGrid gM;
gM.Set(m2.face.begin(),m2.face.end());
int selCnt=0;
for(auto fi=m1.face.begin();fi!=m1.face.end();++fi)
{
Box3< ScalarType> bbox;
(*fi).GetBBox(bbox);
std::vector<FaceType*> inBox;
vcg::tri::GetInBoxFace(m2, gM, bbox,inBox);
for(auto fib=inBox.begin(); fib!=inBox.end(); ++fib)
{
if(Clean<MeshType>::TestFaceFaceIntersection(&*fi,*fib)){
fi->SetS();
++selCnt;
}
}
inBox.clear();
}
return selCnt;
}
}; // end class
/*@}*/
} //End Namespace Tri
} // End Namespace vcg
#endif
|
collective_alltoall.c | /*****************************************************************************
* *
* Mixed-mode OpenMP/MPI MicroBenchmark Suite - Version 1.0 *
* *
* produced by *
* *
* Mark Bull, Jim Enright and Fiona Reid *
* *
* at *
* *
* Edinburgh Parallel Computing Centre *
* *
* email: markb@epcc.ed.ac.uk, fiona@epcc.ed.ac.uk *
* *
* *
* Copyright 2012, The University of Edinburgh *
* *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); *
* you may not use this file except in compliance with the License. *
* You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
* See the License for the specific language governing permissions and *
* limitations under the License. *
* *
****************************************************************************/
/*-----------------------------------------------------------*/
/* Implements the alltoall mixed mode OpenMP/MPI benchmark. */
/*-----------------------------------------------------------*/
#include "collective_alltoall.h"
/*-----------------------------------------------------------*/
/* alltoall */
/* */
/* Driver routine for the alltoall benchmark. */
/*-----------------------------------------------------------*/
int alltoall(){
int dataSizeIter;
int bufferSize;
/* Initialise repsToDo to defaultReps */
repsToDo = defaultReps;
/* Start loop over data sizes */
dataSizeIter = minDataSize; /* initialise dataSizeIter */
while (dataSizeIter <= maxDataSize){
/* Calculate bufferSize and allocate space for
* the data arrays.
*/
bufferSize = dataSizeIter * numThreads * \
numMPIprocs * numThreads;
allocateAlltoallData(bufferSize);
/* Perform warm-up of benchmark */
alltoallKernel(warmUpIters, dataSizeIter);
/* Test if alltoall was successful */
testAlltoall(dataSizeIter);
/* Initialise the benchmark */
benchComplete = FALSE;
/* Execute benchmark until target time is reached */
while (benchComplete != TRUE){
/* Start timer */
MPI_Barrier(comm);
startTime = MPI_Wtime();
/* Execute alltoall for repsToDo repetitions */
alltoallKernel(repsToDo, dataSizeIter);
/* Stop timer */
MPI_Barrier(comm);
finishTime = MPI_Wtime();
totalTime = finishTime - startTime;
/* Test if target time was reached */
if (myMPIRank==0){
benchComplete = repTimeCheck(totalTime, repsToDo);
}
/* Ensure all procs have the same value of benchComplete */
/* and repsToDo */
MPI_Bcast(&benchComplete, 1, MPI_INT, 0, comm);
MPI_Bcast(&repsToDo, 1, MPI_INT, 0, comm);
}
/* Master process sets benchmark result for reporting */
if (myMPIRank == 0){
setReportParams(dataSizeIter, repsToDo, totalTime);
printReport();
}
/* Free allocated data */
freeAlltoallData();
/* Double data size and loop again */
dataSizeIter = dataSizeIter * 2;
}
return 0;
}
/*-----------------------------------------------------------*/
/* alltoallKernel */
/* */
/* Implements the all to all benchmark. */
/* Each thread sends/receives dataSize items to/from */
/* every other process. */
/*-----------------------------------------------------------*/
int alltoallKernel(int totalReps, int dataSize){
int repIter, i, j;
int dataForEachProc, numsToWrite;
int blockNum, startOffset;
/* Calculate how much data each thread sends to each process */
numsToWrite = numThreads * dataSize;
/* Calculate total amount of data each process receives
* from any other process....
* ...each thread gets dataSize items from every other thread.
*/
dataForEachProc = numThreads * numThreads * dataSize;
for (repIter=0; repIter<totalReps; repIter++){
/* Each thread writes numsToWrite items for each
* MPI process to alltoallSendBuf.
*/
#pragma omp parallel default(none) \
private(blockNum,i,j) \
shared(numsToWrite,dataForEachProc,globalIDarray) \
shared(alltoallSendBuf,numMPIprocs)
{
/* Calculate the blockNum of each thread.
* This is used to find which portion of the
* dataForEachProc elements a thread will
* be responsible for.
*/
blockNum = (myThreadID)* numsToWrite;
/* Write threadID to correct location in
* alltoallSendBuf.
*/
for (i=0; i<numMPIprocs; i++){ /* loop over MPI processes */
for (j=0; j<numsToWrite; j++){ /* loop over data to write */
alltoallSendBuf[blockNum +(i * dataForEachProc) + j] = \
globalIDarray[myThreadID];
}
}
}
/* Call MPI_AlltoAll */
MPI_Alltoall(alltoallSendBuf, dataForEachProc, MPI_INT, \
alltoallRecvBuf, dataForEachProc, MPI_INT, \
comm);
/* Each thread now reads the receive buffer so that it gets
* dataSize values from every other thread in its portion
* of alltoallFinalBuf.
*/
#pragma omp parallel default(none) \
private(blockNum,startOffset,i,j) \
shared(alltoallRecvBuf,alltoallFinalBuf,numMPIprocs) \
shared(dataForEachProc,numsToWrite,dataSize,globalIDarray) \
shared(numThreads)
{
/* Calculate the blockNum.
* This identifies which portion of the data from each
* process a thread is responsible for.
*/
blockNum = myThreadID * dataSize;
/* Calculate the offset into each MPI processes finalBuf
* where each thread will start to read its data.
*/
startOffset = (numsToWrite * numMPIprocs) * myThreadID;
/* Loop over all processors (threads & proceeses). */
for (i=0; i<(numThreads * numMPIprocs); i++){
for (j=0; j<dataSize; j++){
alltoallFinalBuf[startOffset + (i * dataSize) + j] = \
alltoallRecvBuf[blockNum + (i * numsToWrite) + j];
}
}
}
}
return 0;
}
/*-----------------------------------------------------------*/
/* allocateAlltoallData */
/* */
/* Allocates memory for the main data arrays used in the */
/* alltoall benchmark. */
/*-----------------------------------------------------------*/
int allocateAlltoallData(int bufferSize){
alltoallSendBuf = (int *) malloc(bufferSize * sizeof(int));
alltoallRecvBuf = (int *) malloc(bufferSize * sizeof(int));
alltoallFinalBuf = (int *) malloc(bufferSize * sizeof(int));
return 0;
}
/*-----------------------------------------------------------*/
/* freeAlltoallData */
/* */
/* Free memory of the main data arrays. */
/*-----------------------------------------------------------*/
int freeAlltoallData(){
free(alltoallSendBuf);
free(alltoallRecvBuf);
free(alltoallFinalBuf);
return 0;
}
/*-----------------------------------------------------------*/
/* testAlltoall */
/* */
/* Verifies that the all to all completed successfully. */
/*-----------------------------------------------------------*/
int testAlltoall(int dataSize){
int sizeofBuffer, i, j;
int dataForEachThread, startElem;
int testFlag, reduceFlag;
int *testBuf;
/* Set testFlag to true */
testFlag = TRUE;
/* calculate the size of the buffer on each process and allocate */
sizeofBuffer = dataSize * numThreads * numMPIprocs * numThreads;
testBuf = (int *) malloc(sizeofBuffer * sizeof(int));
/* Calculate how many elements each thread will work with */
dataForEachThread = dataSize * numThreads * numMPIprocs;
/* Fill buffer with expected values. */
#pragma omp parallel default(none) \
private(i,j,startElem) \
shared(testBuf,globalIDarray,sizeofBuffer,dataSize) \
shared(numThreads,numMPIprocs,dataForEachThread)
{
/* Calculate start element for each thread */
startElem = (myThreadID) * dataForEachThread;
for (i=0; i<(numThreads * numMPIprocs); i++){
for (j=0; j<dataSize; j++){
testBuf[startElem + (i * dataSize) + j] = i;
}
}
}
/* Compare */
for (i=0; i<sizeofBuffer; i++){
if (alltoallFinalBuf[i] != testBuf[i]){
testFlag = FALSE;
}
}
/* Reduce testFlag with logical AND operator to
* get overall test result.
*/
MPI_Reduce(&testFlag, &reduceFlag, 1, MPI_INT, MPI_LAND, 0, comm);
/* Master then sets testOutcome flag */
if (myMPIRank == 0){
setTestOutcome(reduceFlag);
}
/* Free space for testBuf */
free(testBuf);
return 0;
}
|
3d25pt_var.c | /*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 4;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] =
coef[0][i][j][k] * A[(t)%2][i ][j ][k ] +
coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) +
coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) +
coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) +
coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) +
coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) +
coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) +
coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) +
coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) +
coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) +
coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) +
coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) +
coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ;
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 16;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
nanort.h | //
// NanoRT, single header only modern ray tracing kernel.
//
/*
The MIT License (MIT)
Copyright (c) 2015 Light Transport Entertainment, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#pragma once
#define _CRT_SECURE_NO_WARNINGS
#ifndef __NANORT_H__
#define __NANORT_H__
#include <vector>
#include <queue>
#include <cmath>
#include <limits>
#include <cstdlib>
#include <cstring>
#include <string>
namespace nanort {
// Parallelized BVH build is not yet fully tested,
// thus turn off if you face a problem when building BVH.
#define NANORT_ENABLE_PARALLEL_BUILD (0)
// Small vector class useful for multi-threaded environment.
//
// stack_container.h
//
// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//#include "base/basictypes.h"
// This allocator can be used with STL containers to provide a stack buffer
// from which to allocate memory and overflows onto the heap. This stack buffer
// would be allocated on the stack and allows us to avoid heap operations in
// some situations.
//
// STL likes to make copies of allocators, so the allocator itself can't hold
// the data. Instead, we make the creator responsible for creating a
// StackAllocator::Source which contains the data. Copying the allocator
// merely copies the pointer to this shared source, so all allocators created
// based on our allocator will share the same stack buffer.
//
// This stack buffer implementation is very simple. The first allocation that
// fits in the stack buffer will use the stack buffer. Any subsequent
// allocations will not use the stack buffer, even if there is unused room.
// This makes it appropriate for array-like containers, but the caller should
// be sure to reserve() in the container up to the stack buffer size. Otherwise
// the container will allocate a small array which will "use up" the stack
// buffer.
template <typename T, size_t stack_capacity>
class StackAllocator : public std::allocator<T> {
public:
typedef typename std::allocator<T>::pointer pointer;
typedef typename std::allocator<T>::size_type size_type;
// Backing store for the allocator. The container owner is responsible for
// maintaining this for as long as any containers using this allocator are
// live.
struct Source {
Source() : used_stack_buffer_(false) {}
// Casts the buffer in its right type.
T *stack_buffer() { return reinterpret_cast<T *>(stack_buffer_); }
const T *stack_buffer() const {
return reinterpret_cast<const T *>(stack_buffer_);
}
//
// IMPORTANT: Take care to ensure that stack_buffer_ is aligned
// since it is used to mimic an array of T.
// Be careful while declaring any unaligned types (like bool)
// before stack_buffer_.
//
// The buffer itself. It is not of type T because we don't want the
// constructors and destructors to be automatically called. Define a POD
// buffer of the right size instead.
char stack_buffer_[sizeof(T[stack_capacity])];
// Set when the stack buffer is used for an allocation. We do not track
// how much of the buffer is used, only that somebody is using it.
bool used_stack_buffer_;
};
// Used by containers when they want to refer to an allocator of type U.
template <typename U> struct rebind {
typedef StackAllocator<U, stack_capacity> other;
};
// For the straight up copy c-tor, we can share storage.
StackAllocator(const StackAllocator<T, stack_capacity> &rhs)
: source_(rhs.source_) {}
// ISO C++ requires the following constructor to be defined,
// and std::vector in VC++2008SP1 Release fails with an error
// in the class _Container_base_aux_alloc_real (from <xutility>)
// if the constructor does not exist.
// For this constructor, we cannot share storage; there's
// no guarantee that the Source buffer of Ts is large enough
// for Us.
// TODO: If we were fancy pants, perhaps we could share storage
// iff sizeof(T) == sizeof(U).
template <typename U, size_t other_capacity>
StackAllocator(const StackAllocator<U, other_capacity> &other)
: source_(NULL) {}
explicit StackAllocator(Source *source) : source_(source) {}
// Actually do the allocation. Use the stack buffer if nobody has used it yet
// and the size requested fits. Otherwise, fall through to the standard
// allocator.
pointer allocate(size_type n, void *hint = 0) {
if (source_ != NULL && !source_->used_stack_buffer_ &&
n <= stack_capacity) {
source_->used_stack_buffer_ = true;
return source_->stack_buffer();
} else {
return std::allocator<T>::allocate(n, hint);
}
}
// Free: when trying to free the stack buffer, just mark it as free. For
// non-stack-buffer pointers, just fall though to the standard allocator.
void deallocate(pointer p, size_type n) {
if (source_ != NULL && p == source_->stack_buffer())
source_->used_stack_buffer_ = false;
else
std::allocator<T>::deallocate(p, n);
}
private:
Source *source_;
};
// A wrapper around STL containers that maintains a stack-sized buffer that the
// initial capacity of the vector is based on. Growing the container beyond the
// stack capacity will transparently overflow onto the heap. The container must
// support reserve().
//
// WATCH OUT: the ContainerType MUST use the proper StackAllocator for this
// type. This object is really intended to be used only internally. You'll want
// to use the wrappers below for different types.
template <typename TContainerType, int stack_capacity> class StackContainer {
public:
typedef TContainerType ContainerType;
typedef typename ContainerType::value_type ContainedType;
typedef StackAllocator<ContainedType, stack_capacity> Allocator;
// Allocator must be constructed before the container!
StackContainer() : allocator_(&stack_data_), container_(allocator_) {
// Make the container use the stack allocation by reserving our buffer size
// before doing anything else.
container_.reserve(stack_capacity);
}
// Getters for the actual container.
//
// Danger: any copies of this made using the copy constructor must have
// shorter lifetimes than the source. The copy will share the same allocator
// and therefore the same stack buffer as the original. Use std::copy to
// copy into a "real" container for longer-lived objects.
ContainerType &container() { return container_; }
const ContainerType &container() const { return container_; }
// Support operator-> to get to the container. This allows nicer syntax like:
// StackContainer<...> foo;
// std::sort(foo->begin(), foo->end());
ContainerType *operator->() { return &container_; }
const ContainerType *operator->() const { return &container_; }
#ifdef UNIT_TEST
// Retrieves the stack source so that that unit tests can verify that the
// buffer is being used properly.
const typename Allocator::Source &stack_data() const { return stack_data_; }
#endif
protected:
typename Allocator::Source stack_data_;
Allocator allocator_;
ContainerType container_;
// DISALLOW_EVIL_CONSTRUCTORS(StackContainer);
StackContainer(const StackContainer &) = delete;
void operator=(const StackContainer &) = delete;
};
// StackString
template <size_t stack_capacity>
class StackString
: public StackContainer<
std::basic_string<char, std::char_traits<char>,
StackAllocator<char, stack_capacity> >,
stack_capacity> {
public:
StackString()
: StackContainer<std::basic_string<char, std::char_traits<char>,
StackAllocator<char, stack_capacity> >,
stack_capacity>() {}
private:
// DISALLOW_EVIL_CONSTRUCTORS(StackString);
StackString(const StackString &);
void operator=(const StackString &);
};
// StackWString
template <size_t stack_capacity>
class StackWString
: public StackContainer<
std::basic_string<wchar_t, std::char_traits<wchar_t>,
StackAllocator<wchar_t, stack_capacity> >,
stack_capacity> {
public:
StackWString()
: StackContainer<
std::basic_string<wchar_t, std::char_traits<wchar_t>,
StackAllocator<wchar_t, stack_capacity> >,
stack_capacity>() {}
private:
// DISALLOW_EVIL_CONSTRUCTORS(StackWString);
StackWString(const StackWString &);
void operator=(const StackWString &);
};
// StackVector
//
// Example:
// StackVector<int, 16> foo;
// foo->push_back(22); // we have overloaded operator->
// foo[0] = 10; // as well as operator[]
template <typename T, size_t stack_capacity>
class StackVector
: public StackContainer<std::vector<T, StackAllocator<T, stack_capacity> >,
stack_capacity> {
public:
StackVector()
: StackContainer<std::vector<T, StackAllocator<T, stack_capacity> >,
stack_capacity>() {}
// We need to put this in STL containers sometimes, which requires a copy
// constructor. We can't call the regular copy constructor because that will
// take the stack buffer from the original. Here, we create an empty object
// and make a stack buffer of its own.
StackVector(const StackVector<T, stack_capacity> &other)
: StackContainer<std::vector<T, StackAllocator<T, stack_capacity> >,
stack_capacity>() {
this->container().assign(other->begin(), other->end());
}
StackVector<T, stack_capacity> &
operator=(const StackVector<T, stack_capacity> &other) {
this->container().assign(other->begin(), other->end());
return *this;
}
// Vectors are commonly indexed, which isn't very convenient even with
// operator-> (using "->at()" does exception stuff we don't want).
T &operator[](size_t i) { return this->container().operator[](i); }
const T &operator[](size_t i) const {
return this->container().operator[](i);
}
};
namespace {
struct float3 {
float3() {}
float3(float xx, float yy, float zz) {
x = xx;
y = yy;
z = zz;
}
float3(const float *p) {
x = p[0];
y = p[1];
z = p[2];
}
float3 operator*(float f) const { return float3(x * f, y * f, z * f); }
float3 operator-(const float3 &f2) const {
return float3(x - f2.x, y - f2.y, z - f2.z);
}
float3 operator*(const float3 &f2) const {
return float3(x * f2.x, y * f2.y, z * f2.z);
}
float3 operator+(const float3 &f2) const {
return float3(x + f2.x, y + f2.y, z + f2.z);
}
float3 &operator+=(const float3 &f2) {
x += f2.x;
y += f2.y;
z += f2.z;
return (*this);
}
float3 operator/(const float3 &f2) const {
return float3(x / f2.x, y / f2.y, z / f2.z);
}
float operator[](int i) const { return (&x)[i]; }
float &operator[](int i) { return (&x)[i]; }
float3 neg() { return float3(-x, -y, -z); }
float length() { return sqrtf(x * x + y * y + z * z); }
void normalize() {
float len = length();
if (fabs(len) > 1.0e-6f) {
float inv_len = 1.0f / len;
x *= inv_len;
y *= inv_len;
z *= inv_len;
}
}
float x, y, z;
// float pad; // for alignment
};
const float3* as_float3(const float* v) { return (const float3*)v; }
inline float3 operator*(float f, const float3 &v) {
return float3(v.x * f, v.y * f, v.z * f);
}
inline float3 vcross(const float3& a, const float3& b) {
float3 c;
c[0] = a[1] * b[2] - a[2] * b[1];
c[1] = a[2] * b[0] - a[0] * b[2];
c[2] = a[0] * b[1] - a[1] * b[0];
return c;
}
inline float vdot(const float3& a, const float3& b) {
return a[0] * b[0] + a[1] * b[1] + a[2] * b[2];
}
} // namespace
struct Intersection {
float t = 0.f;
float u = 0.f;
float v = 0.f;
unsigned int faceID = 0.f;
Intersection() {}
Intersection(float _t, float _u, float _v, unsigned int i) : t(_t), u(_u), v(_v), faceID(i) {}
} ;
typedef struct {
float org[3]; // must set
float dir[3]; // must set
float invDir[3]; // filled internally
int dirSign[3]; // filled internally
} Ray;
class BVHNode {
public:
BVHNode(){};
~BVHNode(){};
float bmin[3];
float bmax[3];
int flag; // 1 = leaf node, 0 = branch node
int axis;
// leaf
// data[0] = npoints
// data[1] = index
//
// branch
// data[0] = child[0]
// data[1] = child[1]
unsigned int data[2];
};
namespace {
class IsectComparator {
public:
bool operator()(const Intersection &a, const Intersection &b) const {
return a.t < b.t;
}
};
// Stores furthest intersection at top
typedef std::priority_queue<Intersection, std::vector<Intersection>,
IsectComparator> IsectVector;
template <typename T> class Matrix {
public:
void Print(T m[4][4]) {
for (int i = 0; i < 4; i++) {
printf("m[%d] = %f, %f, %f, %f\n", i, m[i][0], m[i][1], m[i][2], m[i][3]);
}
}
void Identity(T m[4][4]) {
m[0][0] = 1.0; m[0][1] = 0.0; m[0][2] = 0.0; m[0][3] = 0.0;
m[1][0] = 0.0; m[1][1] = 1.0; m[1][2] = 0.0; m[1][3] = 0.0;
m[2][0] = 0.0; m[2][1] = 0.0; m[2][2] = 1.0; m[2][3] = 0.0;
m[3][0] = 0.0; m[3][1] = 0.0; m[3][2] = 0.0; m[3][3] = 1.0;
}
void Inverse(T m[4][4]) {
/*
* codes from intel web
* cramer's rule version
*/
int i, j;
T tmp[12]; /* tmp array for pairs */
T tsrc[16]; /* array of transpose source matrix */
T det; /* determinant */
/* transpose matrix */
for (i = 0; i < 4; i++) {
tsrc[i] = m[i][0];
tsrc[i + 4] = m[i][1];
tsrc[i + 8] = m[i][2];
tsrc[i + 12] = m[i][3];
}
/* calculate pair for first 8 elements(cofactors) */
tmp[0] = tsrc[10] * tsrc[15];
tmp[1] = tsrc[11] * tsrc[14];
tmp[2] = tsrc[9] * tsrc[15];
tmp[3] = tsrc[11] * tsrc[13];
tmp[4] = tsrc[9] * tsrc[14];
tmp[5] = tsrc[10] * tsrc[13];
tmp[6] = tsrc[8] * tsrc[15];
tmp[7] = tsrc[11] * tsrc[12];
tmp[8] = tsrc[8] * tsrc[14];
tmp[9] = tsrc[10] * tsrc[12];
tmp[10] = tsrc[8] * tsrc[13];
tmp[11] = tsrc[9] * tsrc[12];
/* calculate first 8 elements(cofactors) */
m[0][0] = tmp[0] * tsrc[5] + tmp[3] * tsrc[6] + tmp[4] * tsrc[7];
m[0][0] -= tmp[1] * tsrc[5] + tmp[2] * tsrc[6] + tmp[5] * tsrc[7];
m[0][1] = tmp[1] * tsrc[4] + tmp[6] * tsrc[6] + tmp[9] * tsrc[7];
m[0][1] -= tmp[0] * tsrc[4] + tmp[7] * tsrc[6] + tmp[8] * tsrc[7];
m[0][2] = tmp[2] * tsrc[4] + tmp[7] * tsrc[5] + tmp[10] * tsrc[7];
m[0][2] -= tmp[3] * tsrc[4] + tmp[6] * tsrc[5] + tmp[11] * tsrc[7];
m[0][3] = tmp[5] * tsrc[4] + tmp[8] * tsrc[5] + tmp[11] * tsrc[6];
m[0][3] -= tmp[4] * tsrc[4] + tmp[9] * tsrc[5] + tmp[10] * tsrc[6];
m[1][0] = tmp[1] * tsrc[1] + tmp[2] * tsrc[2] + tmp[5] * tsrc[3];
m[1][0] -= tmp[0] * tsrc[1] + tmp[3] * tsrc[2] + tmp[4] * tsrc[3];
m[1][1] = tmp[0] * tsrc[0] + tmp[7] * tsrc[2] + tmp[8] * tsrc[3];
m[1][1] -= tmp[1] * tsrc[0] + tmp[6] * tsrc[2] + tmp[9] * tsrc[3];
m[1][2] = tmp[3] * tsrc[0] + tmp[6] * tsrc[1] + tmp[11] * tsrc[3];
m[1][2] -= tmp[2] * tsrc[0] + tmp[7] * tsrc[1] + tmp[10] * tsrc[3];
m[1][3] = tmp[4] * tsrc[0] + tmp[9] * tsrc[1] + tmp[10] * tsrc[2];
m[1][3] -= tmp[5] * tsrc[0] + tmp[8] * tsrc[1] + tmp[11] * tsrc[2];
/* calculate pairs for second 8 elements(cofactors) */
tmp[0] = tsrc[2] * tsrc[7];
tmp[1] = tsrc[3] * tsrc[6];
tmp[2] = tsrc[1] * tsrc[7];
tmp[3] = tsrc[3] * tsrc[5];
tmp[4] = tsrc[1] * tsrc[6];
tmp[5] = tsrc[2] * tsrc[5];
tmp[6] = tsrc[0] * tsrc[7];
tmp[7] = tsrc[3] * tsrc[4];
tmp[8] = tsrc[0] * tsrc[6];
tmp[9] = tsrc[2] * tsrc[4];
tmp[10] = tsrc[0] * tsrc[5];
tmp[11] = tsrc[1] * tsrc[4];
/* calculate second 8 elements(cofactors) */
m[2][0] = tmp[0] * tsrc[13] + tmp[3] * tsrc[14] + tmp[4] * tsrc[15];
m[2][0] -= tmp[1] * tsrc[13] + tmp[2] * tsrc[14] + tmp[5] * tsrc[15];
m[2][1] = tmp[1] * tsrc[12] + tmp[6] * tsrc[14] + tmp[9] * tsrc[15];
m[2][1] -= tmp[0] * tsrc[12] + tmp[7] * tsrc[14] + tmp[8] * tsrc[15];
m[2][2] = tmp[2] * tsrc[12] + tmp[7] * tsrc[13] + tmp[10] * tsrc[15];
m[2][2] -= tmp[3] * tsrc[12] + tmp[6] * tsrc[13] + tmp[11] * tsrc[15];
m[2][3] = tmp[5] * tsrc[12] + tmp[8] * tsrc[13] + tmp[11] * tsrc[14];
m[2][3] -= tmp[4] * tsrc[12] + tmp[9] * tsrc[13] + tmp[10] * tsrc[14];
m[3][0] = tmp[2] * tsrc[10] + tmp[5] * tsrc[11] + tmp[1] * tsrc[9];
m[3][0] -= tmp[4] * tsrc[11] + tmp[0] * tsrc[9] + tmp[3] * tsrc[10];
m[3][1] = tmp[8] * tsrc[11] + tmp[0] * tsrc[8] + tmp[7] * tsrc[10];
m[3][1] -= tmp[6] * tsrc[10] + tmp[9] * tsrc[11] + tmp[1] * tsrc[8];
m[3][2] = tmp[6] * tsrc[9] + tmp[11] * tsrc[11] + tmp[3] * tsrc[8];
m[3][2] -= tmp[10] * tsrc[11] + tmp[2] * tsrc[8] + tmp[7] * tsrc[9];
m[3][3] = tmp[10] * tsrc[10] + tmp[4] * tsrc[8] + tmp[9] * tsrc[9];
m[3][3] -= tmp[8] * tsrc[9] + tmp[11] * tsrc[0] + tmp[5] * tsrc[8];
/* calculate determinant */
det = tsrc[0] * m[0][0] + tsrc[1] * m[0][1] + tsrc[2] * m[0][2] +
tsrc[3] * m[0][3];
/* calculate matrix inverse */
det = 1.0 / det;
for (j = 0; j < 4; j++) {
for (i = 0; i < 4; i++) {
m[j][i] *= det;
}
}
}
void Transpose(T m[4][4]) {
T t[4][4];
// Transpose
for (int j = 0; j < 4; j++) {
for (int i = 0; i < 4; i++) {
t[j][i] = m[i][j];
}
}
// Copy
for (int j = 0; j < 4; j++) {
for (int i = 0; i < 4; i++) {
m[j][i] = t[j][i];
}
}
}
void Mult(T dst[4][4], const T m0[4][4], const T m1[4][4]) {
for (int i = 0; i < 4; ++i) {
for (int j = 0; j < 4; ++j) {
dst[i][j] = 0;
for (int k = 0; k < 4; ++k) {
dst[i][j] += m0[k][j] * m1[i][k];
}
}
}
}
void MultV(T dst[3], const T m[4][4], const T v[3]) {
T tmp[3];
tmp[0] = m[0][0] * v[0] + m[1][0] * v[1] + m[2][0] * v[2] + m[3][0];
tmp[1] = m[0][1] * v[0] + m[1][1] * v[1] + m[2][1] * v[2] + m[3][1];
tmp[2] = m[0][2] * v[0] + m[1][2] * v[1] + m[2][2] * v[2] + m[3][2];
dst[0] = tmp[0];
dst[1] = tmp[1];
dst[2] = tmp[2];
}
void MultV(float3 &dst, const T m[4][4], const float3 &v) {
T tmp[3];
tmp[0] = m[0][0] * v[0] + m[1][0] * v[1] + m[2][0] * v[2] + m[3][0];
tmp[1] = m[0][1] * v[0] + m[1][1] * v[1] + m[2][1] * v[2] + m[3][1];
tmp[2] = m[0][2] * v[0] + m[1][2] * v[1] + m[2][2] * v[2] + m[3][2];
dst[0] = tmp[0];
dst[1] = tmp[1];
dst[2] = tmp[2];
}
};
}
///< BVH build option.
struct BVHBuildOptions {
float costTaabb;
int minLeafPrimitives;
int maxTreeDepth;
int binSize;
int shallowDepth;
size_t minPrimitivesForParallelBuild;
// Cache bounding box computation.
// Requires more memory, but BVHbuild can be faster.
bool cacheBBox;
// Set default value: Taabb = 0.2
BVHBuildOptions()
: costTaabb(0.2f), minLeafPrimitives(4), maxTreeDepth(256), binSize(64),
shallowDepth(3), minPrimitivesForParallelBuild(1024 * 128),
cacheBBox(false) {}
};
///< BVH build statistics.
class BVHBuildStatistics {
public:
int maxTreeDepth;
int numLeafNodes;
int numBranchNodes;
float epsScale;
double buildSecs;
// Set default value: Taabb = 0.2
BVHBuildStatistics()
: maxTreeDepth(0), numLeafNodes(0), numBranchNodes(0), epsScale(1.0f),
buildSecs(0.0) {}
};
///< BVH trace option.
class BVHTraceOptions {
public:
// Hit only for face IDs in indexRange.
// This feature is good to mimic something like glDrawArrays()
unsigned int faceIdsRange[2];
BVHTraceOptions() {
faceIdsRange[0] = 0;
faceIdsRange[1] = 0x7FFFFFFF; // Up to 2G face IDs.
}
};
class BBox {
public:
float bmin[3];
float bmax[3];
BBox() {
bmin[0] = bmin[1] = bmin[2] = std::numeric_limits<float>::max();
bmax[0] = bmax[1] = bmax[2] = -std::numeric_limits<float>::max();
}
};
class BVHAccel {
public:
BVHAccel() : epsScale_(1.0f){};
~BVHAccel(){};
///< Build BVH for input mesh.
bool Build(const float *vertices, const unsigned int *faces,
const unsigned int numFaces, const BVHBuildOptions &options);
///< Get statistics of built BVH tree. Valid after Build()
BVHBuildStatistics GetStatistics() const { return stats_; }
///< Dump built BVH to the file.
bool Dump(const char *filename);
/// Load BVH binary
bool Load(const char *filename);
///< Traverse into BVH along ray and find closest hit point if found
bool Traverse(Intersection &isect, const float *vertices,
const unsigned int *faces, const Ray &ray, const BVHTraceOptions& options);
///< Multi-hit ray tracversal
///< Returns `maxIntersections` frontmost intersections
bool MultiHitTraverse(StackVector<Intersection, 128> &isects,
int maxIntersections, const float *vertices,
const unsigned int *faces, Ray &ray);
const std::vector<BVHNode> &GetNodes() const { return nodes_; }
const std::vector<unsigned int> &GetIndices() const { return indices_; }
void BoundingBox(float bmin[3], float bmax[3]) const {
if (nodes_.empty()) {
bmin[0] = bmin[1] = bmin[2] = std::numeric_limits<float>::max();
bmax[0] = bmax[1] = bmax[2] = -std::numeric_limits<float>::max();
} else {
bmin[0] = nodes_[0].bmin[0];
bmin[1] = nodes_[0].bmin[1];
bmin[2] = nodes_[0].bmin[2];
bmax[0] = nodes_[0].bmax[0];
bmax[1] = nodes_[0].bmax[1];
bmax[2] = nodes_[0].bmax[2];
}
}
private:
#if NANORT_ENABLE_PARALLEL_BUILD
typedef struct {
unsigned int leftIdx;
unsigned int rightIdx;
unsigned int offset;
} ShallowNodeInfo;
// Used only during BVH construction
std::vector<ShallowNodeInfo> shallowNodeInfos_;
///< Builds shallow BVH tree recursively.
unsigned int BuildShallowTree(std::vector<BVHNode> &outNodes,
const float *vertices,
const unsigned int *faces, unsigned int leftIdx,
unsigned int rightIdx, int depth,
int maxShallowDepth, float epsScale);
#endif
///< Builds BVH tree recursively.
size_t BuildTree(BVHBuildStatistics &outStat, std::vector<BVHNode> &outNodes,
const float *vertices, const unsigned int *faces,
unsigned int leftIdx, unsigned int rightIdx, int depth,
float epsScale);
BVHBuildOptions options_;
std::vector<BVHNode> nodes_;
std::vector<unsigned int> indices_; // max 4G triangles.
BVHBuildStatistics stats_;
float epsScale_;
std::vector<BBox> bboxes_;
};
#if 0
class BVHBox
{
}
class Scene
{
std::vector<BVHBox> nodes_;
};
#endif
} // namespace nanort
#ifdef NANORT_IMPLEMENTATION
#include <limits>
#include <cassert>
#include <algorithm>
#include <functional>
//
// SAH functions
//
namespace nanort {
struct BinBuffer {
BinBuffer(int size) {
binSize = size;
bin.resize(2 * 3 * size);
clear();
}
void clear() { memset(&bin[0], 0, sizeof(size_t) * 2 * 3 * binSize); }
std::vector<size_t> bin; // (min, max) * xyz * binsize
int binSize;
};
inline float CalculateSurfaceArea(const float3 &min, const float3 &max) {
float3 box = max - min;
return 2.0f * (box[0] * box[1] + box[1] * box[2] + box[2] * box[0]);
}
inline void GetBoundingBoxOfTriangle(float3 &bmin, float3 &bmax,
const float *vertices,
const unsigned int *faces,
unsigned int index) {
unsigned int f0 = faces[3 * index + 0];
unsigned int f1 = faces[3 * index + 1];
unsigned int f2 = faces[3 * index + 2];
float3 p[3];
p[0] = float3(&vertices[3 * f0]);
p[1] = float3(&vertices[3 * f1]);
p[2] = float3(&vertices[3 * f2]);
bmin = p[0];
bmax = p[0];
for (int i = 1; i < 3; i++) {
bmin[0] = std::min(bmin[0], p[i][0]);
bmin[1] = std::min(bmin[1], p[i][1]);
bmin[2] = std::min(bmin[2], p[i][2]);
bmax[0] = std::max(bmax[0], p[i][0]);
bmax[1] = std::max(bmax[1], p[i][1]);
bmax[2] = std::max(bmax[2], p[i][2]);
}
}
void ContributeBinBuffer(BinBuffer *bins, // [out]
const float3 &sceneMin, const float3 &sceneMax,
const float *vertices, const unsigned int *faces,
unsigned int *indices, unsigned int leftIdx,
unsigned int rightIdx, float epsScale) {
const float kEPS = std::numeric_limits<float>::epsilon() * epsScale;
float binSize = (float)bins->binSize;
// Calculate extent
float3 sceneSize, sceneInvSize;
sceneSize = sceneMax - sceneMin;
for (int i = 0; i < 3; ++i) {
assert(sceneSize[i] >= 0.0);
if (sceneSize[i] > kEPS) {
sceneInvSize[i] = binSize / sceneSize[i];
} else {
sceneInvSize[i] = 0.0;
}
}
// Clear bin data
std::fill(bins->bin.begin(), bins->bin.end(), 0);
// memset(&bins->bin[0], 0, sizeof(2 * 3 * bins->binSize));
size_t idxBMin[3];
size_t idxBMax[3];
for (size_t i = leftIdx; i < rightIdx; i++) {
//
// Quantize the position into [0, BIN_SIZE)
//
// q[i] = (int)(p[i] - scene_bmin) / scene_size
//
float3 bmin;
float3 bmax;
GetBoundingBoxOfTriangle(bmin, bmax, vertices, faces, indices[i]);
float3 quantizedBMin = (bmin - sceneMin) * sceneInvSize;
float3 quantizedBMax = (bmax - sceneMin) * sceneInvSize;
// idx is now in [0, BIN_SIZE)
for (int j = 0; j < 3; ++j) {
int q0 = (int)quantizedBMin[j];
if (q0 < 0)
q0 = 0;
int q1 = (int)quantizedBMax[j];
if (q1 < 0)
q1 = 0;
idxBMin[j] = (unsigned int)q0;
idxBMax[j] = (unsigned int)q1;
if (idxBMin[j] >= binSize)
idxBMin[j] = (size_t)binSize - 1;
if (idxBMax[j] >= binSize)
idxBMax[j] = (size_t)binSize - 1;
assert(idxBMin[j] < binSize);
assert(idxBMax[j] < binSize);
// Increment bin counter
bins->bin[0 * (bins->binSize * 3) + j * bins->binSize + idxBMin[j]] += 1;
bins->bin[1 * (bins->binSize * 3) + j * bins->binSize + idxBMax[j]] += 1;
}
}
}
inline float SAH(size_t ns1, float leftArea, size_t ns2, float rightArea,
float invS, float Taabb, float Ttri) {
// const float Taabb = 0.2f;
// const float Ttri = 0.8f;
float T;
T = 2.0f * Taabb + (leftArea * invS) * (float)(ns1)*Ttri +
(rightArea * invS) * (float)(ns2)*Ttri;
return T;
}
bool FindCutFromBinBuffer(float *cutPos, // [out] xyz
int &minCostAxis, // [out]
const BinBuffer *bins, const float3 &bmin,
const float3 &bmax, size_t numTriangles,
float costTaabb, // should be in [0.0, 1.0]
float epsScale) {
const float eps = std::numeric_limits<float>::epsilon() * epsScale;
size_t left, right;
float3 bsize, bstep;
float3 bminLeft, bmaxLeft;
float3 bminRight, bmaxRight;
float saLeft, saRight, saTotal;
float pos;
float minCost[3];
float costTtri = 1.0f - costTaabb;
minCostAxis = 0;
bsize = bmax - bmin;
bstep = bsize * (1.0f / bins->binSize);
saTotal = CalculateSurfaceArea(bmin, bmax);
float invSaTotal = 0.0f;
if (saTotal > eps) {
invSaTotal = 1.0f / saTotal;
}
for (int j = 0; j < 3; ++j) {
//
// Compute SAH cost for right side of each cell of the bbox.
// Exclude both extreme side of the bbox.
//
// i: 0 1 2 3
// +----+----+----+----+----+
// | | | | | |
// +----+----+----+----+----+
//
float minCostPos = bmin[j] + 0.5f * bstep[j];
minCost[j] = std::numeric_limits<float>::max();
left = 0;
right = numTriangles;
bminLeft = bminRight = bmin;
bmaxLeft = bmaxRight = bmax;
for (int i = 0; i < bins->binSize - 1; ++i) {
left += bins->bin[0 * (3 * bins->binSize) + j * bins->binSize + i];
right -= bins->bin[1 * (3 * bins->binSize) + j * bins->binSize + i];
assert(left <= numTriangles);
assert(right <= numTriangles);
//
// Split pos bmin + (i + 1) * (bsize / BIN_SIZE)
// +1 for i since we want a position on right side of the cell.
//
pos = bmin[j] + (i + 0.5f) * bstep[j];
bmaxLeft[j] = pos;
bminRight[j] = pos;
saLeft = CalculateSurfaceArea(bminLeft, bmaxLeft);
saRight = CalculateSurfaceArea(bminRight, bmaxRight);
float cost =
SAH(left, saLeft, right, saRight, invSaTotal, costTaabb, costTtri);
if (cost < minCost[j]) {
//
// Update the min cost
//
minCost[j] = cost;
minCostPos = pos;
// minCostAxis = j;
}
}
cutPos[j] = minCostPos;
}
// cutAxis = minCostAxis;
// cutPos = minCostPos;
// Find min cost axis
float cost = minCost[0];
minCostAxis = 0;
if (cost > minCost[1]) {
minCostAxis = 1;
cost = minCost[1];
}
if (cost > minCost[2]) {
minCostAxis = 2;
cost = minCost[2];
}
return true;
}
class SAHPred : public std::unary_function<unsigned int, bool> {
public:
SAHPred(int axis, float pos, const float *vertices, const unsigned int *faces)
: axis_(axis), pos_(pos), vertices_(vertices), faces_(faces) {}
bool operator()(unsigned int i) const {
int axis = axis_;
float pos = pos_;
unsigned int i0 = faces_[3 * i + 0];
unsigned int i1 = faces_[3 * i + 1];
unsigned int i2 = faces_[3 * i + 2];
float3 p0(&vertices_[3 * i0]);
float3 p1(&vertices_[3 * i1]);
float3 p2(&vertices_[3 * i2]);
float center = p0[axis] + p1[axis] + p2[axis];
return (center < pos * 3.0f);
}
private:
int axis_;
float pos_;
const float *vertices_;
const unsigned int *faces_;
};
#ifdef _OPENMP
void ComputeBoundingBoxOMP(float3 &bmin, float3 &bmax, const float *vertices,
const unsigned int *faces, unsigned int *indices,
unsigned int leftIndex, unsigned int rightIndex,
float epsScale) {
const float kEPS = std::numeric_limits<float>::epsilon() * epsScale;
long long i = leftIndex;
long long idx = indices[i];
long long n = rightIndex - leftIndex;
bmin[0] = vertices[3 * faces[3 * idx + 0] + 0] - kEPS;
bmin[1] = vertices[3 * faces[3 * idx + 0] + 1] - kEPS;
bmin[2] = vertices[3 * faces[3 * idx + 0] + 2] - kEPS;
bmax[0] = vertices[3 * faces[3 * idx + 0] + 0] + kEPS;
bmax[1] = vertices[3 * faces[3 * idx + 0] + 1] + kEPS;
bmax[2] = vertices[3 * faces[3 * idx + 0] + 2] + kEPS;
float local_bmin[3] = {bmin[0], bmin[1], bmin[2]};
float local_bmax[3] = {bmax[0], bmax[1], bmax[2]};
#pragma omp parallel firstprivate(local_bmin, local_bmax) if (n > (1024 * 128))
{
#pragma omp for
for (i = leftIndex; i < rightIndex; i++) { // for each faces
size_t idx = indices[i];
for (int j = 0; j < 3; j++) { // for each face vertex
size_t fid = faces[3 * idx + j];
for (int k = 0; k < 3; k++) { // xyz
float minval = vertices[3 * fid + k] - kEPS;
float maxval = vertices[3 * fid + k] + kEPS;
if (local_bmin[k] > minval)
local_bmin[k] = minval;
if (local_bmax[k] < maxval)
local_bmax[k] = maxval;
}
}
}
#pragma omp critical
{
for (int k = 0; k < 3; k++) {
if (local_bmin[k] < bmin[k]) {
{
if (local_bmin[k] < bmin[k])
bmin[k] = local_bmin[k];
}
}
if (local_bmax[k] > bmax[k]) {
{
if (local_bmax[k] > bmax[k])
bmax[k] = local_bmax[k];
}
}
}
}
}
}
#endif
void ComputeBoundingBox(float3 &bmin, float3 &bmax, const float *vertices,
const unsigned int *faces, unsigned int *indices,
unsigned int leftIndex, unsigned int rightIndex,
float epsScale) {
const float kEPS = std::numeric_limits<float>::epsilon() * epsScale;
long long i = leftIndex;
long long idx = indices[i];
bmin[0] = vertices[3 * faces[3 * idx + 0] + 0] - kEPS;
bmin[1] = vertices[3 * faces[3 * idx + 0] + 1] - kEPS;
bmin[2] = vertices[3 * faces[3 * idx + 0] + 2] - kEPS;
bmax[0] = vertices[3 * faces[3 * idx + 0] + 0] + kEPS;
bmax[1] = vertices[3 * faces[3 * idx + 0] + 1] + kEPS;
bmax[2] = vertices[3 * faces[3 * idx + 0] + 2] + kEPS;
float local_bmin[3] = {bmin[0], bmin[1], bmin[2]};
float local_bmax[3] = {bmax[0], bmax[1], bmax[2]};
{
for (i = leftIndex; i < rightIndex; i++) { // for each faces
size_t idx = indices[i];
for (int j = 0; j < 3; j++) { // for each face vertex
size_t fid = faces[3 * idx + j];
for (int k = 0; k < 3; k++) { // xyz
float minval = vertices[3 * fid + k] - kEPS;
float maxval = vertices[3 * fid + k] + kEPS;
if (local_bmin[k] > minval)
local_bmin[k] = minval;
if (local_bmax[k] < maxval)
local_bmax[k] = maxval;
}
}
}
for (int k = 0; k < 3; k++) {
bmin[k] = local_bmin[k];
bmax[k] = local_bmax[k];
}
}
}
void GetBoundingBox(float3 &bmin, float3 &bmax, std::vector<BBox> &bboxes,
unsigned int *indices, unsigned int leftIndex,
unsigned int rightIndex, float epsScale) {
const float kEPS = std::numeric_limits<float>::epsilon() * epsScale;
long long i = leftIndex;
long long idx = indices[i];
bmin[0] = bboxes[idx].bmin[0] - kEPS;
bmin[1] = bboxes[idx].bmin[1] - kEPS;
bmin[2] = bboxes[idx].bmin[2] - kEPS;
bmax[0] = bboxes[idx].bmax[0] + kEPS;
bmax[1] = bboxes[idx].bmax[1] + kEPS;
bmax[2] = bboxes[idx].bmax[2] + kEPS;
float local_bmin[3] = {bmin[0], bmin[1], bmin[2]};
float local_bmax[3] = {bmax[0], bmax[1], bmax[2]};
{
for (i = leftIndex; i < rightIndex; i++) { // for each faces
size_t idx = indices[i];
for (int k = 0; k < 3; k++) { // xyz
float minval = bboxes[idx].bmin[k] - kEPS;
float maxval = bboxes[idx].bmax[k] + kEPS;
if (local_bmin[k] > minval)
local_bmin[k] = minval;
if (local_bmax[k] < maxval)
local_bmax[k] = maxval;
}
}
for (int k = 0; k < 3; k++) {
bmin[k] = local_bmin[k];
bmax[k] = local_bmax[k];
}
}
}
//
// --
//
#if NANORT_ENABLE_PARALLEL_BUILD
unsigned int BVHAccel::BuildShallowTree(std::vector<BVHNode> &outNodes,
const float *vertices,
const unsigned int *faces,
unsigned int leftIdx,
unsigned int rightIdx, int depth,
int maxShallowDepth, float epsScale) {
assert(leftIdx <= rightIdx);
unsigned int offset = outNodes.size();
if (stats_.maxTreeDepth < depth) {
stats_.maxTreeDepth = depth;
}
float3 bmin, bmax;
ComputeBoundingBox(bmin, bmax, vertices, faces, &indices_.at(0), leftIdx,
rightIdx, epsScale);
long long n = rightIdx - leftIdx;
if ((n < options_.minLeafPrimitives) || (depth >= options_.maxTreeDepth)) {
// Create leaf node.
BVHNode leaf;
leaf.bmin[0] = bmin[0];
leaf.bmin[1] = bmin[1];
leaf.bmin[2] = bmin[2];
leaf.bmax[0] = bmax[0];
leaf.bmax[1] = bmax[1];
leaf.bmax[2] = bmax[2];
assert(leftIdx < std::numeric_limits<unsigned int>::max());
leaf.flag = 1; // leaf
leaf.data[0] = n;
leaf.data[1] = (unsigned int)leftIdx;
outNodes.push_back(leaf); // atomic update
stats_.numLeafNodes++;
return offset;
}
//
// Create branch node.
//
if (depth >= maxShallowDepth) {
// Delay to build tree
ShallowNodeInfo info;
info.leftIdx = leftIdx;
info.rightIdx = rightIdx;
info.offset = offset;
shallowNodeInfos_.push_back(info);
// Add dummy node.
BVHNode node;
node.axis = -1;
node.flag = -1;
outNodes.push_back(node);
return offset;
} else {
//
// Compute SAH and find best split axis and position
//
int minCutAxis = 0;
float cutPos[3] = {0.0, 0.0, 0.0};
BinBuffer bins(options_.binSize);
ContributeBinBuffer(&bins, bmin, bmax, vertices, faces, &indices_.at(0),
leftIdx, rightIdx, epsScale);
FindCutFromBinBuffer(cutPos, minCutAxis, &bins, bmin, bmax, n,
options_.costTaabb, epsScale);
// Try all 3 axis until good cut position avaiable.
unsigned int midIdx;
int cutAxis = minCutAxis;
for (int axisTry = 0; axisTry < 1; axisTry++) {
unsigned int *begin = &indices_[leftIdx];
unsigned int *end = &indices_[rightIdx - 1] + 1; // mimics end() iterator.
unsigned int *mid = 0;
// try minCutAxis first.
cutAxis = (minCutAxis + axisTry) % 3;
//
// Split at (cutAxis, cutPos)
// indices_ will be modified.
//
mid = std::partition(begin, end,
SAHPred(cutAxis, cutPos[cutAxis], vertices, faces));
midIdx = leftIdx + (mid - begin);
if ((midIdx == leftIdx) || (midIdx == rightIdx)) {
// Can't split well.
// Switch to object median(which may create unoptimized tree, but
// stable)
midIdx = leftIdx + (n >> 1);
// Try another axis if there's axis to try.
} else {
// Found good cut. exit loop.
break;
}
}
BVHNode node;
node.axis = cutAxis;
node.flag = 0; // 0 = branch
outNodes.push_back(node);
unsigned int leftChildIndex = 0;
unsigned int rightChildIndex = 0;
leftChildIndex =
BuildShallowTree(outNodes, vertices, faces, leftIdx, midIdx, depth + 1,
maxShallowDepth, epsScale);
rightChildIndex =
BuildShallowTree(outNodes, vertices, faces, midIdx, rightIdx, depth + 1,
maxShallowDepth, epsScale);
if ((leftChildIndex != (unsigned int)(-1)) &&
(rightChildIndex != (unsigned int)(-1))) {
outNodes[offset].data[0] = leftChildIndex;
outNodes[offset].data[1] = rightChildIndex;
outNodes[offset].bmin[0] = bmin[0];
outNodes[offset].bmin[1] = bmin[1];
outNodes[offset].bmin[2] = bmin[2];
outNodes[offset].bmax[0] = bmax[0];
outNodes[offset].bmax[1] = bmax[1];
outNodes[offset].bmax[2] = bmax[2];
} else {
if ((leftChildIndex == (unsigned int)(-1)) &&
(rightChildIndex != (unsigned int)(-1))) {
fprintf(stderr, "??? : %u, %u\n", leftChildIndex, rightChildIndex);
exit(-1);
} else if ((leftChildIndex != (unsigned int)(-1)) &&
(rightChildIndex == (unsigned int)(-1))) {
fprintf(stderr, "??? : %u, %u\n", leftChildIndex, rightChildIndex);
exit(-1);
}
}
}
stats_.numBranchNodes++;
return offset;
}
#endif
inline size_t BVHAccel::BuildTree(BVHBuildStatistics &outStat,
std::vector<BVHNode> &outNodes,
const float *vertices, const unsigned int *faces,
unsigned int leftIdx, unsigned int rightIdx,
int depth, float epsScale) {
assert(leftIdx <= rightIdx);
size_t offset = outNodes.size();
if (outStat.maxTreeDepth < depth) {
outStat.maxTreeDepth = depth;
}
float3 bmin, bmax;
if (!bboxes_.empty()) {
GetBoundingBox(bmin, bmax, bboxes_, &indices_.at(0), leftIdx, rightIdx,
epsScale);
} else {
ComputeBoundingBox(bmin, bmax, vertices, faces, &indices_.at(0), leftIdx,
rightIdx, epsScale);
}
long long n = rightIdx - leftIdx;
if ((n < options_.minLeafPrimitives) || (depth >= options_.maxTreeDepth)) {
// Create leaf node.
BVHNode leaf;
leaf.bmin[0] = bmin[0];
leaf.bmin[1] = bmin[1];
leaf.bmin[2] = bmin[2];
leaf.bmax[0] = bmax[0];
leaf.bmax[1] = bmax[1];
leaf.bmax[2] = bmax[2];
assert(leftIdx < std::numeric_limits<unsigned int>::max());
leaf.flag = 1; // leaf
leaf.data[0] = (unsigned int)n;
leaf.data[1] = (unsigned int)leftIdx;
outNodes.push_back(leaf); // atomic update
outStat.numLeafNodes++;
return offset;
}
//
// Create branch node.
//
//
// Compute SAH and find best split axis and position
//
int minCutAxis = 0;
float cutPos[3] = {0.0, 0.0, 0.0};
BinBuffer bins(options_.binSize);
ContributeBinBuffer(&bins, bmin, bmax, vertices, faces, &indices_.at(0),
leftIdx, rightIdx, epsScale);
FindCutFromBinBuffer(cutPos, minCutAxis, &bins, bmin, bmax, n,
options_.costTaabb, epsScale);
// Try all 3 axis until good cut position avaiable.
unsigned int midIdx;
int cutAxis = minCutAxis;
for (int axisTry = 0; axisTry < 1; axisTry++) {
unsigned int *begin = &indices_[leftIdx];
unsigned int *end = &indices_[rightIdx - 1] + 1; // mimics end() iterator.
unsigned int *mid = 0;
// try minCutAxis first.
cutAxis = (minCutAxis + axisTry) % 3;
//
// Split at (cutAxis, cutPos)
// indices_ will be modified.
//
mid = std::partition(begin, end,
SAHPred(cutAxis, cutPos[cutAxis], vertices, faces));
midIdx = leftIdx + (unsigned int)(mid - begin);
if ((midIdx == leftIdx) || (midIdx == rightIdx)) {
// Can't split well.
// Switch to object median(which may create unoptimized tree, but
// stable)
midIdx = leftIdx + (unsigned int)(n >> 1);
// Try another axis if there's axis to try.
} else {
// Found good cut. exit loop.
break;
}
}
BVHNode node;
node.axis = cutAxis;
node.flag = 0; // 0 = branch
outNodes.push_back(node); // atomic update
unsigned int leftChildIndex = 0;
unsigned int rightChildIndex = 0;
leftChildIndex = (unsigned int)BuildTree(outStat, outNodes, vertices, faces, leftIdx,
midIdx, depth + 1, epsScale);
rightChildIndex = (unsigned int)BuildTree(outStat, outNodes, vertices, faces, midIdx,
rightIdx, depth + 1, epsScale);
{
outNodes[offset].data[0] = leftChildIndex;
outNodes[offset].data[1] = rightChildIndex;
outNodes[offset].bmin[0] = bmin[0];
outNodes[offset].bmin[1] = bmin[1];
outNodes[offset].bmin[2] = bmin[2];
outNodes[offset].bmax[0] = bmax[0];
outNodes[offset].bmax[1] = bmax[1];
outNodes[offset].bmax[2] = bmax[2];
}
outStat.numBranchNodes++;
return offset;
}
inline bool BVHAccel::Build(const float *vertices, const unsigned int *faces,
unsigned int numFaces, const BVHBuildOptions &options) {
options_ = options;
stats_ = BVHBuildStatistics();
assert(options_.binSize > 1);
size_t n = numFaces;
//
// 1. Create triangle indices(this will be permutated in BuildTree)
//
indices_.resize(n);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (long long i = 0; i < (long long)n; i++) {
indices_[i] = (unsigned int)i;
}
//
// 2. Compute bounding box to find scene scale.
//
float epsScale = 1.0f;
float3 bmin, bmax;
if (options.cacheBBox) {
bmin[0] = bmin[1] = bmin[2] = std::numeric_limits<float>::max();
bmax[0] = bmax[1] = bmax[2] = -std::numeric_limits<float>::max();
bboxes_.resize(n);
for (size_t i = 0; i < n; i++) { // for each faces
size_t idx = indices_[i];
BBox bbox;
for (int j = 0; j < 3; j++) { // for each face vertex
size_t fid = faces[3 * idx + j];
for (int k = 0; k < 3; k++) { // xyz
float minval = vertices[3 * fid + k];
float maxval = vertices[3 * fid + k];
if (bbox.bmin[k] > minval) {
bbox.bmin[k] = minval;
}
if (bbox.bmax[k] < maxval) {
bbox.bmax[k] = maxval;
}
}
}
bboxes_[idx] = bbox;
for (int k = 0; k < 3; k++) { // xyz
if (bmin[k] > bbox.bmin[k]) {
bmin[k] = bbox.bmin[k];
}
if (bmax[k] < bbox.bmax[k]) {
bmax[k] = bbox.bmax[k];
}
}
}
} else {
#ifdef _OPENMP
ComputeBoundingBoxOMP(bmin, bmax, vertices, faces, &indices_.at(0), 0, n,
epsScale);
#else
ComputeBoundingBox(bmin, bmax, vertices, faces, &indices_.at(0), 0, (unsigned int)n,
epsScale);
#endif
}
// Find max
float3 bsize = bmax - bmin;
epsScale = std::abs(bsize[0]);
if (epsScale < std::abs(bsize[1])) {
epsScale = std::abs(bsize[1]);
}
if (epsScale < std::abs(bsize[2])) {
epsScale = std::abs(bsize[2]);
}
//
// 3. Build tree
//
#ifdef _OPENMP
#if NANORT_ENABLE_PARALLEL_BUILD
// Do parallel build for enoughly large dataset.
if (n > options.minPrimitivesForParallelBuild) {
BuildShallowTree(nodes_, vertices, faces, 0, n, /* root depth */ 0,
options.shallowDepth, epsScale); // [0, n)
assert(shallowNodeInfos_.size() > 0);
// Build deeper tree in parallel
std::vector<std::vector<BVHNode> > local_nodes(shallowNodeInfos_.size());
std::vector<BVHBuildStatistics> local_stats(shallowNodeInfos_.size());
#pragma omp parallel for
for (int i = 0; i < (int)shallowNodeInfos_.size(); i++) {
unsigned int leftIdx = shallowNodeInfos_[i].leftIdx;
unsigned int rightIdx = shallowNodeInfos_[i].rightIdx;
BuildTree(local_stats[i], local_nodes[i], vertices, faces, leftIdx,
rightIdx, options.shallowDepth, epsScale);
}
// Join local nodes
for (int i = 0; i < (int)local_nodes.size(); i++) {
assert(!local_nodes[i].empty());
size_t offset = nodes_.size();
// Add offset to child index(for branch node).
for (size_t j = 0; j < local_nodes[i].size(); j++) {
if (local_nodes[i][j].flag == 0) { // branch
local_nodes[i][j].data[0] += offset - 1;
local_nodes[i][j].data[1] += offset - 1;
}
}
// replace
nodes_[shallowNodeInfos_[i].offset] = local_nodes[i][0];
// Skip root element of the local node.
nodes_.insert(nodes_.end(), local_nodes[i].begin() + 1,
local_nodes[i].end());
}
// Join statistics
for (int i = 0; i < (int)local_nodes.size(); i++) {
stats_.maxTreeDepth =
std::max(stats_.maxTreeDepth, local_stats[i].maxTreeDepth);
stats_.numLeafNodes += local_stats[i].numLeafNodes;
stats_.numBranchNodes += local_stats[i].numBranchNodes;
}
} else {
BuildTree(stats_, nodes_, vertices, faces, 0, n, /* root depth */ 0,
epsScale); // [0, n)
}
#else // !NANORT_ENABLE_PARALLEL_BUILD
{
BuildTree(stats_, nodes_, vertices, faces, 0, n, /* root depth */ 0,
epsScale); // [0, n)
}
#endif
#else // !_OPENMP
{
BuildTree(stats_, nodes_, vertices, faces, 0, (unsigned int)n, /* root depth */ 0,
epsScale); // [0, n)
}
#endif
stats_.epsScale = epsScale;
epsScale_ = epsScale;
return true;
}
inline bool BVHAccel::Dump(const char *filename) {
FILE *fp = fopen(filename, "wb");
if (!fp) {
fprintf(stderr, "[BVHAccel] Cannot write a file: %s\n", filename);
return false;
}
unsigned long long numNodes = nodes_.size();
assert(nodes_.size() > 0);
unsigned long long numIndices = indices_.size();
size_t r = 0;
r = fwrite(&numNodes, sizeof(unsigned long long), 1, fp);
assert(r == 1);
r = fwrite(&nodes_.at(0), sizeof(BVHNode), numNodes, fp);
assert(r == numNodes);
r = fwrite(&numIndices, sizeof(unsigned long long), 1, fp);
assert(r == 1);
r = fwrite(&indices_.at(0), sizeof(unsigned int), numIndices, fp);
assert(r == numIndices);
fclose(fp);
return true;
}
inline bool BVHAccel::Load(const char *filename) {
FILE *fp = fopen(filename, "rb");
if (!fp) {
fprintf(stderr, "Cannot open file: %s\n", filename);
return false;
}
unsigned long long numNodes;
unsigned long long numIndices;
size_t r = 0;
r = fread(&numNodes, sizeof(unsigned long long), 1, fp);
assert(r == 1);
assert(numNodes > 0);
nodes_.resize(numNodes);
r = fread(&nodes_.at(0), sizeof(BVHNode), numNodes, fp);
assert(r == numNodes);
r = fread(&numIndices, sizeof(unsigned long long), 1, fp);
assert(r == 1);
indices_.resize(numIndices);
r = fread(&indices_.at(0), sizeof(unsigned int), numIndices, fp);
assert(r == numIndices);
fclose(fp);
return true;
}
namespace {
const int kMaxStackDepth = 512;
inline bool IntersectRayAABB(float &tminOut, // [out]
float &tmaxOut, // [out]
float maxT, float bmin[3], float bmax[3],
const float3& rayOrg, const float3& rayInvDir,
int rayDirSign[3]) {
float tmin, tmax;
const float min_x = rayDirSign[0] ? bmax[0] : bmin[0];
const float min_y = rayDirSign[1] ? bmax[1] : bmin[1];
const float min_z = rayDirSign[2] ? bmax[2] : bmin[2];
const float max_x = rayDirSign[0] ? bmin[0] : bmax[0];
const float max_y = rayDirSign[1] ? bmin[1] : bmax[1];
const float max_z = rayDirSign[2] ? bmin[2] : bmax[2];
// X
const float tmin_x = (min_x - rayOrg[0]) * rayInvDir[0];
const float tmax_x = (max_x - rayOrg[0]) * rayInvDir[0];
// Y
const float tmin_y = (min_y - rayOrg[1]) * rayInvDir[1];
const float tmax_y = (max_y - rayOrg[1]) * rayInvDir[1];
tmin = (tmin_x > tmin_y) ? tmin_x : tmin_y;
tmax = (tmax_x < tmax_y) ? tmax_x : tmax_y;
// Z
const float tmin_z = (min_z - rayOrg[2]) * rayInvDir[2];
const float tmax_z = (max_z - rayOrg[2]) * rayInvDir[2];
tmin = (tmin > tmin_z) ? tmin : tmin_z;
tmax = (tmax < tmax_z) ? tmax : tmax_z;
//
// Hit include (tmin == tmax) edge case(hit 2D plane).
//
if ((tmax > 0.0) && (tmin <= tmax) && (tmin <= maxT)) {
tminOut = tmin;
tmaxOut = tmax;
return true;
}
return false; // no hit
}
inline bool TriangleIsect(float &tInOut, float &uOut, float &vOut,
const float3 &v0, const float3 &v1, const float3 &v2,
const float3 &rayOrg, const float3 &rayDir,
float epsScale) {
const float kEPS = std::numeric_limits<float>::epsilon() * epsScale;
const float3 & p0 = v0;// (v0[0], v0[1], v0[2]);
const float3 & p1 = v1;// (v1[0], v1[1], v1[2]);
const float3 & p2 = v2;// (v2[0], v2[1], v2[2]);
float3 e1, e2;
float3 p, s, q;
e1 = p1 - p0;
e2 = p2 - p0;
p = vcross(rayDir, e2);
float invDet;
float det = vdot(e1, p);
if (std::abs(det) < kEPS) { // no-cull
return false;
}
invDet = 1.0f / det;
s = rayOrg - p0;
q = vcross(s, e1);
float u = vdot(s, p) * invDet;
float v = vdot(q, rayDir) * invDet;
float t = vdot(e2, q) * invDet;
if (u < 0.0f || u > 1.0f)
return false;
if (v <= 0.0f || u + v > 1.0f)
return false;
if (t < 0.0f || t > tInOut)
return false;
tInOut = t;
uOut = u;
vOut = v;
return true;
}
inline bool TestLeafNode(Intersection &isect, // [inout]
const BVHNode &node,
const std::vector<unsigned int> &indices,
const float *vertices, const unsigned int *faces,
const Ray &ray, float epsScale, const BVHTraceOptions& traceOptions) {
bool hit = false;
unsigned int numTriangles = node.data[0];
unsigned int offset = node.data[1];
float t = isect.t; // current hit distance
float3 rayOrg;
rayOrg[0] = ray.org[0];
rayOrg[1] = ray.org[1];
rayOrg[2] = ray.org[2];
float3 rayDir;
rayDir[0] = ray.dir[0];
rayDir[1] = ray.dir[1];
rayDir[2] = ray.dir[2];
for (unsigned int i = 0; i < numTriangles; i++) {
unsigned int faceIdx = indices[i + offset];
if ((faceIdx < traceOptions.faceIdsRange[0]) || (faceIdx >= traceOptions.faceIdsRange[1])) {
continue;
}
int f0 = faces[3 * faceIdx + 0];
int f1 = faces[3 * faceIdx + 1];
int f2 = faces[3 * faceIdx + 2];
float3 v0, v1, v2;
v0[0] = vertices[3 * f0 + 0];
v0[1] = vertices[3 * f0 + 1];
v0[2] = vertices[3 * f0 + 2];
v1[0] = vertices[3 * f1 + 0];
v1[1] = vertices[3 * f1 + 1];
v1[2] = vertices[3 * f1 + 2];
v2[0] = vertices[3 * f2 + 0];
v2[1] = vertices[3 * f2 + 1];
v2[2] = vertices[3 * f2 + 2];
float u, v;
if (TriangleIsect(t, u, v, v0, v1, v2, rayOrg, rayDir, epsScale)) {
// Update isect state
isect.t = t;
isect.u = u;
isect.v = v;
isect.faceID = faceIdx;
hit = true;
}
}
return hit;
}
inline bool MultiHitTestLeafNode(IsectVector &isects, // [inout]
int maxIntersections, const BVHNode &node,
const std::vector<unsigned int> &indices,
const float *vertices,
const unsigned int *faces, const Ray &ray,
float epsScale) {
bool hit = false;
unsigned int numTriangles = node.data[0];
unsigned int offset = node.data[1];
float t = std::numeric_limits<float>::max();
if (isects.size() >= (size_t)maxIntersections) {
t = isects.top().t; // current furthest hit distance
}
const float3& rayOrg = ray.org;
const float3& rayDir = ray.dir;
for (unsigned int i = 0; i < numTriangles; i++)
{
int faceIdx = indices[i + offset];
const unsigned int* ff = &faces[3 * faceIdx];
float3 *v0, *v1, *v2;
v0 = (float3*)(vertices + 3 * (*(ff+0)));
v1 = (float3*)(vertices + 3 * (*(ff+1)));
v2 = (float3*)(vertices + 3 * (*(ff+2)));
float u, v;
if (TriangleIsect(t, u, v, *v0, *v1, *v2, rayOrg, rayDir, epsScale))
{
// Update isect state
if (isects.size() < (size_t)maxIntersections)
{
isects.emplace(t,u,v,faceIdx);
// Update furthest distance to far.
t = std::numeric_limits<float>::max();
hit = true;
}
else
{
if (t < isects.top().t)
{
// delete furthest intersection and add new intersection.
isects.pop();
isects.emplace(t, u, v, faceIdx);
// Update furthest hit distance
t = isects.top().t;
hit = true;
}
}
}
}
return hit;
}
} // namespace
inline bool BVHAccel::Traverse(Intersection &isect, const float *vertices,
const unsigned int *faces, const Ray &ray, const BVHTraceOptions& options) {
float hitT = std::numeric_limits<float>::max(); // far = no hit.
int nodeStackIndex = 0;
int nodeStack[512];
nodeStack[0] = 0;
// Init isect info as no hit
isect.t = hitT;
isect.u = 0.0;
isect.v = 0.0;
isect.faceID = -1;
int dirSign[3];
dirSign[0] = ray.dir[0] < 0.0 ? 1 : 0;
dirSign[1] = ray.dir[1] < 0.0 ? 1 : 0;
dirSign[2] = ray.dir[2] < 0.0 ? 1 : 0;
// @fixme { Check edge case; i.e., 1/0 }
float3 rayInvDir;
rayInvDir[0] = 1.0f / ray.dir[0];
rayInvDir[1] = 1.0f / ray.dir[1];
rayInvDir[2] = 1.0f / ray.dir[2];
float3 rayOrg;
rayOrg[0] = ray.org[0];
rayOrg[1] = ray.org[1];
rayOrg[2] = ray.org[2];
float minT, maxT;
while (nodeStackIndex >= 0) {
int index = nodeStack[nodeStackIndex];
BVHNode &node = nodes_[index];
nodeStackIndex--;
bool hit = IntersectRayAABB(minT, maxT, hitT, node.bmin, node.bmax, rayOrg,
rayInvDir, dirSign);
if (node.flag == 0) { // branch node
if (hit) {
int orderNear = dirSign[node.axis];
int orderFar = 1 - orderNear;
// Traverse near first.
nodeStack[++nodeStackIndex] = node.data[orderFar];
nodeStack[++nodeStackIndex] = node.data[orderNear];
}
} else { // leaf node
if (hit) {
if (TestLeafNode(isect, node, indices_, vertices, faces, ray,
epsScale_, options)) {
hitT = isect.t;
}
}
}
}
assert(nodeStackIndex < kMaxStackDepth);
if (isect.t < std::numeric_limits<float>::max()) {
return true;
}
return false;
}
inline bool BVHAccel::MultiHitTraverse(StackVector<Intersection, 128> &isects,
int maxIntersections, const float *vertices,
const unsigned int *faces, Ray &ray) {
float hitT = std::numeric_limits<float>::max(); // far = no hit.
int nodeStackIndex = 0;
int nodeStack[512];
nodeStack[0] = 0;
IsectVector isectPQ;
isects->clear();
int dirSign[3];
dirSign[0] = ray.dir[0] < 0.0 ? 1 : 0;
dirSign[1] = ray.dir[1] < 0.0 ? 1 : 0;
dirSign[2] = ray.dir[2] < 0.0 ? 1 : 0;
// @fixme { Check edge case; i.e., 1/0 }
float3 rayInvDir;
rayInvDir[0] = 1.0f / ray.dir[0];
rayInvDir[1] = 1.0f / ray.dir[1];
rayInvDir[2] = 1.0f / ray.dir[2];
const float3& rayOrg = *as_float3(ray.org);
float minT, maxT;
while (nodeStackIndex >= 0)
{
int index = nodeStack[nodeStackIndex];
BVHNode &node = nodes_[index];
nodeStackIndex--;
bool hit = IntersectRayAABB(minT, maxT, hitT, node.bmin, node.bmax, rayOrg,
rayInvDir, dirSign);
if (node.flag == 0)
{ // branch node
if (hit)
{
int orderNear = dirSign[node.axis];
int orderFar = 1 - orderNear;
// Traverse near first.
nodeStack[++nodeStackIndex] = node.data[orderFar];
nodeStack[++nodeStackIndex] = node.data[orderNear];
}
}
else
{ // leaf node
if (hit)
{
if (MultiHitTestLeafNode(isectPQ, maxIntersections, node, indices_,
vertices, faces, ray, epsScale_))
{
// Only update `hitT` when queue is full.
if (isectPQ.size() >= (size_t)maxIntersections) {
hitT = isectPQ.top().t;
}
}
}
}
}
assert(nodeStackIndex < kMaxStackDepth);
if (!isectPQ.empty()) {
// Store intesection in reverse order(make it frontmost order)
size_t n = isectPQ.size();
isects->resize(n);
for (size_t i = 0; i < n; i++) {
const Intersection &isect = isectPQ.top();
isects[n - i - 1] = isect;
isectPQ.pop();
}
return true;
}
return false;
}
} // namespace
#endif
#endif // __NANORT_H__
|
GB_unaryop__abs_int64_uint16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_int64_uint16
// op(A') function: GB_tran__abs_int64_uint16
// C type: int64_t
// A type: uint16_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = GB_IABS (aij)
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IABS (x) ;
// casting
#define GB_CASTING(z, x) \
int64_t z = (int64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_INT64 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_int64_uint16
(
int64_t *restrict Cx,
const uint16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_int64_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__isle_uint64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__isle_uint64
// A.*B function (eWiseMult): GB_AemultB__isle_uint64
// A*D function (colscale): GB_AxD__isle_uint64
// D*A function (rowscale): GB_DxB__isle_uint64
// C+=B function (dense accum): GB_Cdense_accumB__isle_uint64
// C+=b function (dense accum): GB_Cdense_accumb__isle_uint64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isle_uint64
// C=scalar+B GB_bind1st__isle_uint64
// C=scalar+B' GB_bind1st_tran__isle_uint64
// C=A+scalar GB_bind2nd__isle_uint64
// C=A'+scalar GB_bind2nd_tran__isle_uint64
// C type: uint64_t
// A type: uint64_t
// B,b type: uint64_t
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x <= y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLE || GxB_NO_UINT64 || GxB_NO_ISLE_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__isle_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__isle_uint64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__isle_uint64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__isle_uint64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__isle_uint64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__isle_uint64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__isle_uint64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__isle_uint64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t bij = Bx [p] ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__isle_uint64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t aij = Ax [p] ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = Ax [pA] ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB_bind1st_tran__isle_uint64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = Ax [pA] ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB_bind2nd_tran__isle_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
zlag2c.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions mixed zc -> ds
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
/***************************************************************************//**
*
* @ingroup plasma_lag2
*
* Converts m-by-n matrix A from complex double to complex single precision.
*
*******************************************************************************
*
* @param[in] m
* The number of rows of the matrix A. m >= 0.
*
* @param[in] n
* The number of columns of the matrix A. n >= 0.
*
* @param[in] pA
* The lda-by-n matrix A in double complex precision.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,m).
*
* @param[out] pAs
* On exit, the ldas-by-n matrix As in single complex precision.
*
* @param[in] ldas
* The leading dimension of the array As. ldas >= max(1,m).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
*
*******************************************************************************
*
* @sa plasma_omp_zlag2c
* @sa plasma_clag2z
* @sa plasma_dlag2s
* @sa plasma_slag2d
*
******************************************************************************/
int plasma_zlag2c(int m, int n,
plasma_complex64_t *pA, int lda,
plasma_complex32_t *pAs, int ldas)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if (m < 0) {
plasma_error("illegal value of m");
return -1;
}
if (n < 0) {
plasma_error("illegal value of n");
return -2;
}
if (lda < imax(1, m)) {
plasma_error("illegal value of lda");
return -4;
}
if (ldas < imax(1, m)) {
plasma_error("illegal value of ldas");
return -6;
}
// quick return
if (imin(n, m) == 0)
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_lag2c(plasma, PlasmaComplexDouble, m, n);
// Set tiling parameters.
int nb = plasma->nb;
// Create tile matrices.
plasma_desc_t A;
plasma_desc_t As;
int retval;
retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb,
m, n, 0, 0, m, n, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb,
m, n, 0, 0, m, n, &As);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_zge2desc(pA, lda, A, &sequence, &request);
plasma_omp_cge2desc(pAs, ldas, As, &sequence, &request);
// Call tile async function.
plasma_omp_zlag2c(A, As, &sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_zdesc2ge(A, pA, lda, &sequence, &request);
plasma_omp_cdesc2ge(As, pAs, ldas, &sequence, &request);
}
// implicit synchronization
// Free matrices in tile layout.
plasma_desc_destroy(&A);
plasma_desc_destroy(&As);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_lag2
*
* Converts m-by-n matrix A from double complex to single complex precision.
* Non-blocking tile version of plasma_zlag2c(). May return before the
* computation is finished. Operates on matrices stored by tiles. All matrices
* are passed through descriptors. All dimensions are taken from the
* descriptors. Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in] A
* Descriptor of matrix A.
*
* @param[out] As
* Descriptor of matrix As.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes). Check the
* sequence->status for errors.
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_zlag2c
* @sa plasma_omp_clag2z
* @sa plasma_omp_dlag2s
* @sa plasma_omp_slag2d
*
******************************************************************************/
void plasma_omp_zlag2c(plasma_desc_t A, plasma_desc_t As,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(As) != PlasmaSuccess) {
plasma_error("invalid As");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (imin(A.m, A.n) == 0)
return;
// Call the parallel function.
plasma_pzlag2c(A, As, sequence, request);
}
|
ten_tusscher_3_two_region.c | #include "model_common.h"
#include <assert.h>
#include <stdlib.h>
#include "ten_tusscher_3_two_region.h"
#define ENDO
GET_CELL_MODEL_DATA(init_cell_model_data) {
assert(cell_model);
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) {
// Default initial conditions
if (extra_data == NULL)
{
sv[0] = -86.2f; // V; millivolt
sv[1] = 0.0f; //M
sv[2] = 0.75; //H
sv[3] = 0.75; //J
sv[4] = 0.0f; //Xr1
sv[5] = 0.0f; //Xs
sv[6] = 1.0f; //S
sv[7] = 1.0f; //F
sv[8] = 1.0f; //F2
sv[9] = 0.0; //D_INF
sv[10] = 0.0; //R_INF
sv[11] = 0.0; //Xr2_INF}
}
else
{
//region 1 = Healthy // region 2 = fibro or border
real *initial_conditions_1 = ((real*)extra_data) + 7; //pointer
real *initial_conditions_2 = ((real*)extra_data) + 26;
real *fibrosis = ((real*)extra_data) + 7 + 31; //pointer
if (fibrosis[sv_id] == 1.0)
{
sv[0] = initial_conditions_1[0]; // V; millivolt
sv[1] = initial_conditions_1[1]; //M
sv[2] = initial_conditions_1[2]; //H
sv[3] = initial_conditions_1[3]; //J
sv[4] = initial_conditions_1[4]; //Xr1
sv[5] = initial_conditions_1[5]; //Xs
sv[6] = initial_conditions_1[6]; //S
sv[7] = initial_conditions_1[7]; //F
sv[8] = initial_conditions_1[8]; //F2
sv[9] = initial_conditions_1[9]; //D_INF
sv[10] = initial_conditions_1[10]; //R_INF
sv[11] = initial_conditions_1[11]; //Xr2_INF}
}
else
{
sv[0] = initial_conditions_2[0]; // V; millivolt
sv[1] = initial_conditions_2[1]; //M
sv[2] = initial_conditions_2[2]; //H
sv[3] = initial_conditions_2[3]; //J
sv[4] = initial_conditions_2[4]; //Xr1
sv[5] = initial_conditions_2[5]; //Xs
sv[6] = initial_conditions_2[6]; //S
sv[7] = initial_conditions_2[7]; //F
sv[8] = initial_conditions_2[8]; //F2
sv[9] = initial_conditions_2[9]; //D_INF
sv[10] = initial_conditions_2[10]; //R_INF
sv[11] = initial_conditions_2[11]; //Xr2_INF}
}
}
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) {
uint32_t sv_id;
real *fibrosis;
// Default values for a healthy cell ///////////
real atpi = 6.8f;
real Ko = 5.4f;
real Ki = 138.3f;
real Vm_change = 0.0;
real GNa_multiplicator = 1.0f;
real GCa_multiplicator = 1.0f;
real INaCa_multiplicator = 1.0f;
real V_0 = -86.2f; // V; millivolt
real M_0 = 0.0f; //M
real H_0 = 0.75; //H
real J_0 = 0.75; //J
real Xr1_0 = 0.0f; //Xr1
real Xs_0 = 0.0f; //Xs
real S_0 = 1.0f; //S
real F_0 = 1.0f; //F
real F2_0 = 1.0f; //F2
real D_inf_0 = 0.0; //D_INF
real R_inf_0 = 0.0; //R_INF
real Xr2_inf_0 = 0.0; //Xr2_INF}
////////////////////////////////////
///////////////////
int num_extra_parameters = 7; /*tenho +7 da reg 2 e +12 de initial_conditions*/
int num_initial_conditions = 12;
size_t extra_parameters_size = (num_extra_parameters+num_initial_conditions)*sizeof(real);
if(extra_data)
{
fibrosis = ((real*)extra_data) + num_extra_parameters + num_extra_parameters + num_initial_conditions + num_initial_conditions; //pointer
}
else
{
extra_data = malloc(extra_parameters_size);
((real*)extra_data)[0] = atpi;
((real*)extra_data)[1] = Ko;
((real*)extra_data)[2] = Ki;
((real*)extra_data)[3] = Vm_change;
((real*)extra_data)[4] = GNa_multiplicator;
((real*)extra_data)[5] = GCa_multiplicator;
((real*)extra_data)[6] = INaCa_multiplicator;
((real*)extra_data)[7] = V_0;
((real*)extra_data)[8] = M_0;
((real*)extra_data)[9] = H_0;
((real*)extra_data)[10] = J_0;
((real*)extra_data)[11] = Xr1_0;
((real*)extra_data)[12] = Xs_0;
((real*)extra_data)[13] = S_0;
((real*)extra_data)[14] = F_0;
((real*)extra_data)[15] = F2_0;
((real*)extra_data)[16] = D_inf_0;
((real*)extra_data)[17] = R_inf_0;
((real*)extra_data)[18] = Xr2_inf_0;
fibrosis = calloc(num_cells_to_solve, sizeof(real));
}
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++)
{
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = i;
for (int j = 0; j < num_steps; ++j)
{
solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i], fibrosis[i], extra_data);
}
}
if(extra_data == NULL) free(fibrosis);
}
void solve_model_ode_cpu(real dt, real *sv, real stim_current, real fibrosis, real *extra_parameters) {
assert(sv);
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu(rY, rDY, stim_current, dt, fibrosis, extra_parameters);
//THIS MODEL USES THE Rush Larsen Method TO SOLVE THE EDOS
sv[0] = dt*rDY[0] + rY[0];
sv[1] = rDY[1];
sv[2] = rDY[2];
sv[3] = rDY[3];
sv[4] = rDY[4];
sv[5] = rDY[5];
sv[6] = rDY[6];
sv[7] = rDY[7];
sv[8] = rDY[8];
sv[9] = rDY[9];
sv[10] = rDY[10];
sv[11] = rDY[11];
}
void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt, real fibrosis, real *extra_parameters) {
//fibrosis = 0 means that the cell is fibrotic, 1 is not fibrotic. Anything between 0 and 1 means border zone
//THIS IS THE STATE VECTOR THAT WE NEED TO SAVE IN THE STEADY STATE
const real svolt = sv[0];
const real sm = sv[1];
const real sh = sv[2];
const real sj = sv[3];
const real sxr1 = sv[4];
const real sxs = sv[5];
const real ss = sv[6];
const real sf = sv[7];
const real sf2 = sv[8];
const real D_INF = sv[9];
const real R_INF = sv[10];
const real Xr2_INF = sv[11];
const real natp = 0.24; // K dependence of ATP-sensitive K current
const real nicholsarea = 0.00005; // Nichol's areas (cm^2)
const real hatp = 2; // Hill coefficient
// Here we set the parameters based on the type of cell in region 1 or region 2...
real atpi;
real Ko;
real Ki;
real Vm_modifier;
real GNa_multplicator;
real GCaL_multplicator;
real INaCa_multplicator;
//Healthy cell
if(fibrosis==1)
{
atpi = extra_parameters[0];
Ko = extra_parameters[1];
Ki = extra_parameters[2];
Vm_modifier = extra_parameters[3];
GNa_multplicator = extra_parameters[4];
GCaL_multplicator = extra_parameters[5];
INaCa_multplicator = extra_parameters[6];
}
//Fibrotic or BorderZone cell
else
{
atpi = extra_parameters[19];
Ko = extra_parameters[20];
Ki = extra_parameters[21];
Vm_modifier = extra_parameters[22];
GNa_multplicator = extra_parameters[23];
GCaL_multplicator = extra_parameters[24];
INaCa_multplicator = extra_parameters[25];
}
//Linear changing of atpi depending on the fibrosis and distance from the center of the scar (only for border zone cells)
//~ real atpi = extra_parameters[0];
real atpi_change = 6.8f - atpi;
atpi = atpi + atpi_change*fibrosis;
//Extracellular potassium concentration was elevated
//from its default value of 5.4 mM to values between 6.0 and 8.0 mM
//Ref: A Comparison of Two Models of Human Ventricular Tissue: Simulated Ischemia and Re-entry
//~ real Ko = extra_parameters[1];
real Ko_change = 5.4f - Ko;
Ko = Ko + Ko_change*fibrosis;
//~ real Ki = extra_parameters[2];
real Ki_change = 138.3 - Ki;
Ki = Ki + Ki_change*fibrosis;
//~ real Vm_modifier = extra_parameters[3];
Vm_modifier = Vm_modifier - Vm_modifier*fibrosis;
//~ real GNa_multplicator = extra_parameters[4];
real GNa_multplicator_change = 1.0f - GNa_multplicator;
GNa_multplicator = GNa_multplicator + GNa_multplicator_change*fibrosis;
//~ real GCaL_multplicator = extra_parameters[5];
real GCaL_multplicator_change = 1.0f - GCaL_multplicator;
GCaL_multplicator = GCaL_multplicator + GCaL_multplicator_change*fibrosis;
//~ real INaCa_multplicator = extra_parameters[6];
real INaCa_multplicator_change = 1.0f - INaCa_multplicator;
INaCa_multplicator = INaCa_multplicator + INaCa_multplicator_change*fibrosis;
//real katp = 0.306;
//Ref: A Comparison of Two Models of Human Ventricular Tissue: Simulated Ischaemia and Re-entry
//real katp = 0.306;
const real katp = -0.0942857142857*atpi + 0.683142857143; //Ref: A Comparison of Two Models of Human Ventricular Tissue: Simulated Ischaemia and Re-entry
const real patp = 1/(1 + pow((atpi/katp),hatp));
const real gkatp = 0.000195/nicholsarea;
const real gkbaratp = gkatp*patp*pow((Ko/5.4),natp);
const real katp2= 1.4;
const real hatp2 = 2.6;
const real pcal = 1.0/(1.0 + pow((katp2/atpi),hatp2));
const real Cao=2.0;
const real Nao=140.0;
const real Cai=0.00007;
const real Nai=7.67;
//Constants
const real R=8314.472;
const real F=96485.3415;
const real T=310.0;
const real RTONF=(R*T)/F;
//Parameters for currents
//Parameters for IKr
const real Gkr=0.101;
//Parameters for Iks
const real pKNa=0.03;
#ifdef EPI
const real Gks=0.257;
#endif
#ifdef ENDO
const real Gks=0.392;
#endif
#ifdef MCELL
const real Gks=0.098;
#endif
//Parameters for Ik1
const real GK1=5.405;
//Parameters for Ito
#ifdef EPI
const real Gto=0.294;
#endif
#ifdef ENDO
const real Gto=0.073;
#endif
#ifdef MCELL
const real Gto=0.294;
#endif
//Parameters for INa
const real GNa=14.838*GNa_multplicator; //ACIDOSIS
//Parameters for IbNa
const real GbNa=0.00029;
//Parameters for INaK
const real KmK=1.0;
const real KmNa=40.0;
const real knak=2.724;
//Parameters for ICaL
const real GCaL=0.2786*pcal*GCaL_multplicator; //ACIDOSIS
//Parameters for IbCa
const real GbCa=0.000592;
//Parameters for INaCa
const real knaca=1000;
const real KmNai=87.5;
const real KmCa=1.38;
const real ksat=0.1;
const real n=0.35;
//Parameters for IpCa
const real GpCa=0.1238;
const real KpCa=0.0005;
//Parameters for IpK;
const real GpK=0.0293;
const real Ek=RTONF*(log((Ko/Ki)));
const real Ena=RTONF*(log((Nao/Nai)));
const real Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
const real Eca=0.5*RTONF*(log((Cao/Cai)));
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real IKatp;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real Xr1_INF;
real Xr2_INF_new;
real TAU_Xr1;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF_new;
real S_INF;
real TAU_S;
real Af;
real Bf;
real Cf;
real Af2;
real Bf2;
real Cf2;
real D_INF_new;
real TAU_F;
real F_INF;
real TAU_F2;
real F2_INF;
real sItot;
//Needed to compute currents
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*((svolt-Vm_modifier)-Ena); //ACIDOSIS
ICaL=GCaL*D_INF*sf*sf2*((svolt-Vm_modifier)-60); //ACIDOSIS
Ito=Gto*R_INF*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*Xr2_INF*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaCa = INaCa*INaCa_multplicator; //ACIDOSIS
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
IKatp = gkbaratp*(svolt-Ek);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
IKatp +
stim_current;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF_new=1./(1.+exp((svolt-(-88.))/24.));
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=(1400./(sqrt(1.+exp((5.-svolt)/6))));
Bxs=(1./(1.+exp((svolt-35.)/15.)));
TAU_Xs=Axs*Bxs+80;
#ifdef EPI
R_INF_new=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF_new=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF_new=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF_new=1./(1.+exp((-8-svolt)/7.5));
F_INF=1./(1.+exp((svolt+20)/7));
Af=1102.5*exp(-(svolt+27)*(svolt+27)/225);
Bf=200./(1+exp((13-svolt)/10.));
Cf=(180./(1+exp((svolt+30)/10)))+20;
TAU_F=Af+Bf+Cf;
F2_INF=0.67/(1.+exp((svolt+35)/7))+0.33;
Af2=600*exp(-(svolt+27)*(svolt+27)/170);
Bf2=7.75/(1.+exp((25-svolt)/10));
Cf2=16/(1.+exp((svolt+30)/10));
TAU_F2=Af2+Bf2+Cf2;
//update voltage
rDY_[0] = -sItot;
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[6]= S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[7] =F_INF-(F_INF-sf)*exp(-dt/TAU_F);
rDY_[8] =F2_INF-(F2_INF-sf2)*exp(-dt/TAU_F2);
rDY_[9] = D_INF_new;
rDY_[10] = R_INF_new;
rDY_[11] = Xr2_INF_new;
}
|
GB_binop__bclr_uint16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bclr_uint16)
// A.*B function (eWiseMult): GB (_AemultB_08__bclr_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__bclr_uint16)
// A.*B function (eWiseMult): GB (_AemultB_04__bclr_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bclr_uint16)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bclr_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__bclr_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bclr_uint16)
// C=scalar+B GB (_bind1st__bclr_uint16)
// C=scalar+B' GB (_bind1st_tran__bclr_uint16)
// C=A+scalar GB (_bind2nd__bclr_uint16)
// C=A'+scalar GB (_bind2nd_tran__bclr_uint16)
// C type: uint16_t
// A type: uint16_t
// A pattern? 0
// B type: uint16_t
// B pattern? 0
// BinaryOp: cij = GB_BITCLR (aij, bij, uint16_t, 16)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_BITCLR (x, y, uint16_t, 16) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BCLR || GxB_NO_UINT16 || GxB_NO_BCLR_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bclr_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bclr_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bclr_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bclr_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint16_t alpha_scalar ;
uint16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bclr_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bclr_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bclr_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bclr_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bclr_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_BITCLR (x, bij, uint16_t, 16) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bclr_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_BITCLR (aij, y, uint16_t, 16) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITCLR (x, aij, uint16_t, 16) ; \
}
GrB_Info GB (_bind1st_tran__bclr_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITCLR (aij, y, uint16_t, 16) ; \
}
GrB_Info GB (_bind2nd_tran__bclr_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__lxor_bool.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__lxor_bool
// A.*B function (eWiseMult): GB_AemultB__lxor_bool
// A*D function (colscale): GB_AxD__lxor_bool
// D*A function (rowscale): GB_DxB__lxor_bool
// C+=B function (dense accum): GB_Cdense_accumB__lxor_bool
// C+=b function (dense accum): GB_Cdense_accumb__lxor_bool
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__lxor_bool
// C=scalar+B GB_bind1st__lxor_bool
// C=scalar+B' GB_bind1st_tran__lxor_bool
// C=A+scalar GB_bind2nd__lxor_bool
// C=A'+scalar GB_bind2nd_tran__lxor_bool
// C type: bool
// A type: bool
// B,b type: bool
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
bool
#define GB_BTYPE \
bool
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
bool bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x != y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LXOR || GxB_NO_BOOL || GxB_NO_LXOR_BOOL)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__lxor_bool
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__lxor_bool
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__lxor_bool
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type bool
bool bwork = (*((bool *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__lxor_bool
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__lxor_bool
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__lxor_bool
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__lxor_bool
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__lxor_bool
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
bool x = (*((bool *) x_input)) ;
bool *Bx = (bool *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
bool bij = Bx [p] ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__lxor_bool
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
bool *Ax = (bool *) Ax_input ;
bool y = (*((bool *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
bool aij = Ax [p] ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
bool aij = Ax [pA] ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB_bind1st_tran__lxor_bool
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
bool
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool x = (*((const bool *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
bool
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
bool aij = Ax [pA] ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB_bind2nd_tran__lxor_bool
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool y = (*((const bool *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
fftw.c |
/*----------------------------------------------------------------*/
#include "hclfftw.h"
#include <sys/time.h>
#include <omp.h>
/*----------------------------------------------------------------*/
fftw_plan fftw1d_init_plan(
const int sign,
const int m, const int n,
fftw_complex* X, fftw_complex* Y
)
{
/*
* 'm' number of 1D row FFTs of size 'n'
*/
int rank = 1;
int howmany = m;
int s[] = {n};
int idist = n;
int odist = n;
int istride = 1;
int ostride = 1;
int *inembed = s;
int *onembed = s;
return fftw_plan_many_dft(
rank, s, howmany,
X, inembed, istride, idist,
Y, onembed, ostride, odist,
sign, FFTW_ESTIMATE);
}
/*----------------------------------------------------------------*/
int
fftw2dlocal(
const int sign,
const unsigned int* m, const int n,
fftw_complex* lMatrix,
double* tGroups
)
{
double ts = omp_get_wtime();
fftw_plan plan = fftw1d_init_plan(
sign, *m, n,
lMatrix, lMatrix);
fftw_execute(plan);
fftw_destroy_plan(plan);
tGroups[0] += omp_get_wtime() - ts;
return 0;
}
/*----------------------------------------------------------------*/
int
fftw2dlocal2(
const int sign,
const unsigned int* m, const int n,
const int nThreadsPerGroup,
fftw_complex* lMatrix,
double* tGroups
)
{
double etime, tstart, tend;
struct timeval start, end;
gettimeofday(&start, NULL);
fftw_plan plan1 = fftw1d_init_plan(
sign, m[0], n,
lMatrix, lMatrix);
fftw_plan plan2 = fftw1d_init_plan(
sign, m[1], n,
lMatrix + m[0]*n,
lMatrix + m[0]*n);
gettimeofday(&end, NULL);
tstart = start.tv_sec + start.tv_usec/1000000.;
tend = end.tv_sec + end.tv_usec/1000000.;
etime = (tend - tstart);
gettimeofday(&start, NULL);
printf("g 2, t %d: Plan creation time %f\n",
nThreadsPerGroup, etime);
#pragma omp parallel sections num_threads(2)
{
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan1);
fftw_destroy_plan(plan1);
tGroups[0] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan2);
fftw_destroy_plan(plan2);
tGroups[1] += etime + omp_get_wtime() - ts;
}
}
gettimeofday(&end, NULL);
tstart = start.tv_sec + start.tv_usec/1000000.;
tend = end.tv_sec + end.tv_usec/1000000.;
printf("g 2, t %d: Plan execution time %f\n",
nThreadsPerGroup, (tend - tstart));
return 0;
}
/*----------------------------------------------------------------*/
int
fftw2dlocal4(
const int sign,
const unsigned int* m, const int n,
const int nThreadsPerGroup,
fftw_complex* lMatrix,
double* tGroups
)
{
double etime, tstart, tend;
struct timeval start, end;
gettimeofday(&start, NULL);
fftw_plan plan1 = fftw1d_init_plan(
sign, m[0], n,
lMatrix, lMatrix);
fftw_plan plan2 = fftw1d_init_plan(
sign, m[1], n,
lMatrix + m[0]*n,
lMatrix + m[0]*n);
fftw_plan plan3 = fftw1d_init_plan(
sign, m[2], n,
lMatrix + (m[0]+m[1])*n,
lMatrix + (m[0]+m[1])*n);
fftw_plan plan4 = fftw1d_init_plan(
sign, m[3], n,
lMatrix + (m[0]+m[1]+m[2])*n,
lMatrix + (m[0]+m[1]+m[2])*n);
gettimeofday(&end, NULL);
tstart = start.tv_sec + start.tv_usec/1000000.;
tend = end.tv_sec + end.tv_usec/1000000.;
etime = (tend - tstart);
gettimeofday(&start, NULL);
printf("g 4, t %d: Plan creation time %f\n",
nThreadsPerGroup, etime);
#pragma omp parallel sections num_threads(4)
{
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan1);
fftw_destroy_plan(plan1);
tGroups[0] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan2);
fftw_destroy_plan(plan2);
tGroups[1] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan3);
fftw_destroy_plan(plan3);
tGroups[2] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan4);
fftw_destroy_plan(plan4);
tGroups[3] += etime + omp_get_wtime() - ts;
}
}
gettimeofday(&end, NULL);
tstart = start.tv_sec + start.tv_usec/1000000.;
tend = end.tv_sec + end.tv_usec/1000000.;
printf("g 4, t %d: Plan execution time %f\n",
nThreadsPerGroup, (tend - tstart));
return 0;
}
/*----------------------------------------------------------------*/
int
fftw2dlocal5(
const int sign,
const unsigned int* m, const int n,
const int nThreadsPerGroup,
fftw_complex* lMatrix,
double* tGroups
)
{
double etime, tstart, tend;
struct timeval start, end;
gettimeofday(&start, NULL);
fftw_plan plan1 = fftw1d_init_plan(
sign, m[0], n,
lMatrix, lMatrix);
fftw_plan plan2 = fftw1d_init_plan(
sign, m[1], n,
lMatrix + m[0]*n,
lMatrix + m[0]*n);
fftw_plan plan3 = fftw1d_init_plan(
sign, m[2], n,
lMatrix + (m[0]+m[1])*n,
lMatrix + (m[0]+m[1])*n);
fftw_plan plan4 = fftw1d_init_plan(
sign, m[3], n,
lMatrix + (m[0]+m[1]+m[2])*n,
lMatrix + (m[0]+m[1]+m[2])*n);
fftw_plan plan5 = fftw1d_init_plan(
sign, m[4], n,
lMatrix + (m[0]+m[1]+m[2]+m[3])*n,
lMatrix + (m[0]+m[1]+m[2]+m[3])*n);
gettimeofday(&end, NULL);
tstart = start.tv_sec + start.tv_usec/1000000.;
tend = end.tv_sec + end.tv_usec/1000000.;
etime = (tend - tstart);
gettimeofday(&start, NULL);
printf("g 5, t %d: Plan creation time %f\n",
nThreadsPerGroup, etime);
#pragma omp parallel sections num_threads(5)
{
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan1);
fftw_destroy_plan(plan1);
tGroups[0] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan2);
fftw_destroy_plan(plan2);
tGroups[1] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan3);
fftw_destroy_plan(plan3);
tGroups[2] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan4);
fftw_destroy_plan(plan4);
tGroups[3] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan5);
fftw_destroy_plan(plan5);
tGroups[4] += etime + omp_get_wtime() - ts;
}
}
gettimeofday(&end, NULL);
tstart = start.tv_sec + start.tv_usec/1000000.;
tend = end.tv_sec + end.tv_usec/1000000.;
printf("g 5, t %d: Plan execution time %f\n",
nThreadsPerGroup, (tend - tstart));
return 0;
}
/*----------------------------------------------------------------*/
int
fftw2dlocal10(
const int sign,
const unsigned int* m, const int n,
const int nThreadsPerGroup,
fftw_complex* lMatrix,
double* tGroups
)
{
double etime, tstart, tend;
struct timeval start, end;
gettimeofday(&start, NULL);
fftw_plan plan1 = fftw1d_init_plan(
sign, m[0], n,
lMatrix, lMatrix);
fftw_plan plan2 = fftw1d_init_plan(
sign, m[1], n,
lMatrix + m[0]*n,
lMatrix + m[0]*n);
fftw_plan plan3 = fftw1d_init_plan(
sign, m[2], n,
lMatrix + (m[0]+m[1])*n,
lMatrix + (m[0]+m[1])*n);
fftw_plan plan4 = fftw1d_init_plan(
sign, m[3], n,
lMatrix + (m[0]+m[1]+m[2])*n,
lMatrix + (m[0]+m[1]+m[2])*n);
fftw_plan plan5 = fftw1d_init_plan(
sign, m[4], n,
lMatrix + (m[0]+m[1]+m[2]+m[3])*n,
lMatrix + (m[0]+m[1]+m[2]+m[3])*n);
fftw_plan plan6 = fftw1d_init_plan(
sign, m[5], n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4])*n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4])*n);
fftw_plan plan7 = fftw1d_init_plan(
sign, m[6], n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5])*n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5])*n);
fftw_plan plan8 = fftw1d_init_plan(
sign, m[7], n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6])*n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6])*n);
fftw_plan plan9 = fftw1d_init_plan(
sign, m[8], n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7])*n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7])*n);
fftw_plan plan10 = fftw1d_init_plan(
sign, m[9], n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7]+m[8])*n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7]+m[8])*n);
gettimeofday(&end, NULL);
tstart = start.tv_sec + start.tv_usec/1000000.;
tend = end.tv_sec + end.tv_usec/1000000.;
etime = (tend - tstart);
gettimeofday(&start, NULL);
printf("g 10, t %d: Plan creation time %f\n",
nThreadsPerGroup, etime);
#pragma omp parallel sections num_threads(10)
{
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan1);
fftw_destroy_plan(plan1);
tGroups[0] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan2);
fftw_destroy_plan(plan2);
tGroups[1] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan3);
fftw_destroy_plan(plan3);
tGroups[2] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan4);
fftw_destroy_plan(plan4);
tGroups[3] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan5);
fftw_destroy_plan(plan5);
tGroups[4] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan6);
fftw_destroy_plan(plan6);
tGroups[5] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan7);
fftw_destroy_plan(plan7);
tGroups[6] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan8);
fftw_destroy_plan(plan8);
tGroups[7] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan9);
fftw_destroy_plan(plan9);
tGroups[8] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan10);
fftw_destroy_plan(plan10);
tGroups[9] += etime + omp_get_wtime() - ts;
}
}
gettimeofday(&end, NULL);
tstart = start.tv_sec + start.tv_usec/1000000.;
tend = end.tv_sec + end.tv_usec/1000000.;
printf("g 10, t %d: Plan execution time %f\n",
nThreadsPerGroup, (tend - tstart));
return 0;
}
/*----------------------------------------------------------------*/
int
fftw2dlocal20(
const int sign,
const unsigned int* m, const int n,
const int nThreadsPerGroup,
fftw_complex* lMatrix,
double* tGroups
)
{
double etime, tstart, tend;
struct timeval start, end;
gettimeofday(&start, NULL);
fftw_plan plan1 = fftw1d_init_plan(
sign, m[0], n,
lMatrix, lMatrix);
fftw_plan plan2 = fftw1d_init_plan(
sign, m[1], n,
lMatrix + m[0]*n,
lMatrix + m[0]*n);
fftw_plan plan3 = fftw1d_init_plan(
sign, m[2], n,
lMatrix + (m[0]+m[1])*n,
lMatrix + (m[0]+m[1])*n);
fftw_plan plan4 = fftw1d_init_plan(
sign, m[3], n,
lMatrix + (m[0]+m[1]+m[2])*n,
lMatrix + (m[0]+m[1]+m[2])*n);
fftw_plan plan5 = fftw1d_init_plan(
sign, m[4], n,
lMatrix + (m[0]+m[1]+m[2]+m[3])*n,
lMatrix + (m[0]+m[1]+m[2]+m[3])*n);
fftw_plan plan6 = fftw1d_init_plan(
sign, m[5], n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4])*n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4])*n);
fftw_plan plan7 = fftw1d_init_plan(
sign, m[6], n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5])*n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5])*n);
fftw_plan plan8 = fftw1d_init_plan(
sign, m[7], n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6])*n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6])*n);
fftw_plan plan9 = fftw1d_init_plan(
sign, m[8], n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7])*n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7])*n);
fftw_plan plan10 = fftw1d_init_plan(
sign, m[9], n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7]+m[8])*n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7]+m[8])*n);
fftw_plan plan11 = fftw1d_init_plan(
sign, m[10], n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7]+m[8]+m[9])*n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7]+m[8]+m[9])*n);
fftw_plan plan12 = fftw1d_init_plan(
sign, m[11], n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7]+m[8]+m[9]+m[10])*n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7]+m[8]+m[9]+m[10])*n);
fftw_plan plan13 = fftw1d_init_plan(
sign, m[12], n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7]+m[8]+m[9]+m[10]+m[11])*n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7]+m[8]+m[9]+m[10]+m[11])*n);
fftw_plan plan14 = fftw1d_init_plan(
sign, m[13], n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7]+m[8]+m[9]+m[10]+m[11]+m[12])*n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7]+m[8]+m[9]+m[10]+m[11]+m[12])*n);
fftw_plan plan15 = fftw1d_init_plan(
sign, m[14], n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7]+m[8]+m[9]+m[10]+m[11]+m[12]+m[13])*n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7]+m[8]+m[9]+m[10]+m[11]+m[12]+m[13])*n);
fftw_plan plan16 = fftw1d_init_plan(
sign, m[15], n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7]+m[8]+m[9]+m[10]+m[11]+m[12]+m[13]+m[14])*n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7]+m[8]+m[9]+m[10]+m[11]+m[12]+m[13]+m[14])*n);
fftw_plan plan17 = fftw1d_init_plan(
sign, m[16], n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7]+m[8]+m[9]+m[10]+m[11]+m[12]+m[13]+m[14]+m[15])*n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7]+m[8]+m[9]+m[10]+m[11]+m[12]+m[13]+m[14]+m[15])*n);
fftw_plan plan18 = fftw1d_init_plan(
sign, m[17], n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7]+m[8]+m[9]+m[10]+m[11]+m[12]+m[13]+m[14]+m[15]+m[16])*n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7]+m[8]+m[9]+m[10]+m[11]+m[12]+m[13]+m[14]+m[15]+m[16])*n);
fftw_plan plan19 = fftw1d_init_plan(
sign, m[18], n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7]+m[8]+m[9]+m[10]+m[11]+m[12]+m[13]+m[14]+m[15]+m[16]+m[17])*n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7]+m[8]+m[9]+m[10]+m[11]+m[12]+m[13]+m[14]+m[15]+m[16]+m[17])*n);
fftw_plan plan20 = fftw1d_init_plan(
sign, m[19], n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7]+m[8]+m[9]+m[10]+m[11]+m[12]+m[13]+m[14]+m[15]+m[16]+m[17]+m[18])*n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7]+m[8]+m[9]+m[10]+m[11]+m[12]+m[13]+m[14]+m[15]+m[16]+m[17]+m[18])*n);
gettimeofday(&end, NULL);
tstart = start.tv_sec + start.tv_usec/1000000.;
tend = end.tv_sec + end.tv_usec/1000000.;
etime = (tend - tstart);
gettimeofday(&start, NULL);
printf("g 20, t %d: Plan creation time %f\n",
nThreadsPerGroup, etime);
#pragma omp parallel sections num_threads(20)
{
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan1);
fftw_destroy_plan(plan1);
tGroups[0] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan2);
fftw_destroy_plan(plan2);
tGroups[1] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan3);
fftw_destroy_plan(plan3);
tGroups[2] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan4);
fftw_destroy_plan(plan4);
tGroups[3] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan5);
fftw_destroy_plan(plan5);
tGroups[4] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan6);
fftw_destroy_plan(plan6);
tGroups[5] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan7);
fftw_destroy_plan(plan7);
tGroups[6] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan8);
fftw_destroy_plan(plan8);
tGroups[7] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan9);
fftw_destroy_plan(plan9);
tGroups[8] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan10);
fftw_destroy_plan(plan10);
tGroups[9] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan11);
fftw_destroy_plan(plan11);
tGroups[10] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan12);
fftw_destroy_plan(plan12);
tGroups[11] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan13);
fftw_destroy_plan(plan13);
tGroups[12] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan14);
fftw_destroy_plan(plan14);
tGroups[13] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan15);
fftw_destroy_plan(plan15);
tGroups[14] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan16);
fftw_destroy_plan(plan16);
tGroups[15] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan17);
fftw_destroy_plan(plan17);
tGroups[16] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan18);
fftw_destroy_plan(plan18);
tGroups[17] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan19);
fftw_destroy_plan(plan19);
tGroups[18] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan20);
fftw_destroy_plan(plan20);
tGroups[19] += etime + omp_get_wtime() - ts;
}
}
gettimeofday(&end, NULL);
tstart = start.tv_sec + start.tv_usec/1000000.;
tend = end.tv_sec + end.tv_usec/1000000.;
printf("g 20, t %d: Plan execution time %f\n",
nThreadsPerGroup, (tend - tstart));
return 0;
}
/*----------------------------------------------------------------*/
int fftwlocal(
const int sign,
const unsigned int* m,
const int n,
const int nThreadsPerGroup,
const int nThreadGroups,
fftw_complex *lMatrix,
double* tGroups
)
{
if (nThreadGroups == 1)
{
fftw2dlocal(
sign,
m, n,
lMatrix, tGroups);
}
if (nThreadGroups == 2)
{
fftw2dlocal2(
sign,
m, n,
nThreadsPerGroup,
lMatrix, tGroups);
}
if (nThreadGroups == 4)
{
fftw2dlocal4(
sign,
m, n,
nThreadsPerGroup,
lMatrix, tGroups);
}
if (nThreadGroups == 5)
{
fftw2dlocal5(
sign,
m, n,
nThreadsPerGroup,
lMatrix, tGroups);
}
if (nThreadGroups == 10)
{
fftw2dlocal10(
sign,
m, n,
nThreadsPerGroup,
lMatrix, tGroups);
}
if (nThreadGroups == 20)
{
fftw2dlocal20(
sign,
m, n,
nThreadsPerGroup,
lMatrix, tGroups);
}
return 0;
}
/*----------------------------------------------------------------*/
|
Example_declare_variant.1.c | /*
* @@name: declare_variant.1c
* @@type: C
* @@compilable: yes
* @@linkable: yes
* @@expect: success
* @@version: omp_5.0
*/
#define N 100
#include <stdio.h>
#include <omp.h>
void p_vxv(int *v1,int *v2,int *v3,int n);
void t_vxv(int *v1,int *v2,int *v3,int n);
#pragma omp declare variant( p_vxv ) match( construct={parallel} )
#pragma omp declare variant( t_vxv ) match( construct={target} )
void vxv(int *v1,int *v2,int *v3,int n) // base function
{
for (int i= 0; i< n; i++) v3[i] = v1[i] * v2[i];
}
void p_vxv(int *v1,int *v2,int *v3,int n) // function variant
{
#pragma omp for
for (int i= 0; i< n; i++) v3[i] = v1[i] * v2[i]*3;
}
#pragma omp declare target
void t_vxv(int *v1,int *v2,int *v3,int n) // function variant
{
#pragma omp distribute simd
for (int i= 0; i< n; i++) v3[i] = v1[i] * v2[i]*2;
}
#pragma omp end declare target
int main()
{
int v1[N], v2[N], v3[N];
for(int i=0; i<N; i++){ v1[i]=(i+1); v2[i]=-(i+1); v3[i]=0; } //init
#pragma omp parallel
{
vxv(v1,v2,v3,N);
}
printf(" %d %d\n",v3[0],v3[N-1]); //from p_vxv -- output: -3 -30000
#pragma omp target teams map(to: v1[:N],v2[:N]) map(from: v3[:N])
{
vxv(v1,v2,v3,N);
}
printf(" %d %d\n",v3[0],v3[N-1]); //from t_vxv -- output: -2 -20000
vxv(v1,v2,v3,N);
printf(" %d %d\n",v3[0],v3[N-1]); //from vxv -- output: -1 -10000
return 0;
}
|
two_step_v_p_strategy.h | //
// Project Name: KratosPFEMFluidDynamicsApplication $
// Last modified by: $Author: AFranci $
// Date: $Date: January 2016 $
// Revision: $Revision: 0.0 $
//
//
#ifndef KRATOS_TWO_STEP_V_P_STRATEGY_H
#define KRATOS_TWO_STEP_V_P_STRATEGY_H
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/deprecated_variables.h"
#include "includes/cfd_variables.h"
#include "utilities/openmp_utils.h"
#include "processes/process.h"
#include "solving_strategies/schemes/scheme.h"
#include "solving_strategies/strategies/solving_strategy.h"
#include "custom_utilities/mesher_utilities.hpp"
#include "custom_utilities/boundary_normals_calculation_utilities.hpp"
#include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme.h"
/* #include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme_slip.h" */
#include "solving_strategies/builder_and_solvers/residualbased_elimination_builder_and_solver.h"
#include "solving_strategies/builder_and_solvers/residualbased_elimination_builder_and_solver_componentwise.h"
#include "solving_strategies/builder_and_solvers/residualbased_block_builder_and_solver.h"
#include "custom_utilities/solver_settings.h"
#include "custom_strategies/strategies/gauss_seidel_linear_strategy.h"
#include "pfem_fluid_dynamics_application_variables.h"
#include <stdio.h>
#include <math.h>
namespace Kratos
{
///@addtogroup PFEMFluidDynamicsApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
template <class TSparseSpace,
class TDenseSpace,
class TLinearSolver>
class TwoStepVPStrategy : public SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION(TwoStepVPStrategy);
/// Counted pointer of TwoStepVPStrategy
//typedef boost::shared_ptr< TwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver> > Pointer;
typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
typedef typename BaseType::TDataType TDataType;
//typedef typename BaseType::DofSetType DofSetType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer StrategyPointerType;
typedef TwoStepVPSolverSettings<TSparseSpace, TDenseSpace, TLinearSolver> SolverSettingsType;
///@}
///@name Life Cycle
///@{
TwoStepVPStrategy(ModelPart &rModelPart,
SolverSettingsType &rSolverConfig) : BaseType(rModelPart)
{
InitializeStrategy(rSolverConfig);
}
TwoStepVPStrategy(ModelPart &rModelPart,
/*SolverConfiguration<TSparseSpace, TDenseSpace, TLinearSolver>& rSolverConfig,*/
typename TLinearSolver::Pointer pVelocityLinearSolver,
typename TLinearSolver::Pointer pPressureLinearSolver,
bool ReformDofSet = true,
double VelTol = 0.0001,
double PresTol = 0.0001,
int MaxPressureIterations = 1, // Only for predictor-corrector
unsigned int TimeOrder = 2,
unsigned int DomainSize = 2) : BaseType(rModelPart), // Move Mesh flag, pass as input?
mVelocityTolerance(VelTol),
mPressureTolerance(PresTol),
mMaxPressureIter(MaxPressureIterations),
mDomainSize(DomainSize),
mTimeOrder(TimeOrder),
mReformDofSet(ReformDofSet)
{
KRATOS_TRY;
BaseType::SetEchoLevel(1);
// Check that input parameters are reasonable and sufficient.
this->Check();
bool CalculateNormDxFlag = true;
bool ReformDofAtEachIteration = false; // DofSet modifiaction is managed by the fractional step strategy, auxiliary strategies should not modify the DofSet directly.
// Additional Typedefs
//typedef typename Kratos::VariableComponent<Kratos::VectorComponentAdaptor<Kratos::array_1d<double, 3 > > > VarComponent;
typedef typename BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer BuilderSolverTypePointer;
typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
//initializing fractional velocity solution step
typedef Scheme<TSparseSpace, TDenseSpace> SchemeType;
typename SchemeType::Pointer pScheme;
typename SchemeType::Pointer Temp = typename SchemeType::Pointer(new ResidualBasedIncrementalUpdateStaticScheme<TSparseSpace, TDenseSpace>());
pScheme.swap(Temp);
//CONSTRUCTION OF VELOCITY
BuilderSolverTypePointer vel_build = BuilderSolverTypePointer(new ResidualBasedEliminationBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pVelocityLinearSolver));
/* BuilderSolverTypePointer vel_build = BuilderSolverTypePointer(new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver > (pVelocityLinearSolver)); */
this->mpMomentumStrategy = typename BaseType::Pointer(new GaussSeidelLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pVelocityLinearSolver, vel_build, ReformDofAtEachIteration, CalculateNormDxFlag));
this->mpMomentumStrategy->SetEchoLevel(BaseType::GetEchoLevel());
vel_build->SetCalculateReactionsFlag(false);
/* BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new ResidualBasedEliminationBuilderAndSolverComponentwise<TSparseSpace, TDenseSpace, TLinearSolver, Variable<double> >(pPressureLinearSolver, PRESSURE)); */
BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pPressureLinearSolver));
this->mpPressureStrategy = typename BaseType::Pointer(new GaussSeidelLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pPressureLinearSolver, pressure_build, ReformDofAtEachIteration, CalculateNormDxFlag));
this->mpPressureStrategy->SetEchoLevel(BaseType::GetEchoLevel());
pressure_build->SetCalculateReactionsFlag(false);
KRATOS_CATCH("");
}
/// Destructor.
virtual ~TwoStepVPStrategy() {}
int Check() override
{
KRATOS_TRY;
// Check elements and conditions in the model part
int ierr = BaseType::Check();
if (ierr != 0)
return ierr;
if (DELTA_TIME.Key() == 0)
KRATOS_THROW_ERROR(std::runtime_error, "DELTA_TIME Key is 0. Check that the application was correctly registered.", "");
if (BDF_COEFFICIENTS.Key() == 0)
KRATOS_THROW_ERROR(std::runtime_error, "BDF_COEFFICIENTS Key is 0. Check that the application was correctly registered.", "");
ModelPart &rModelPart = BaseType::GetModelPart();
if (mTimeOrder == 2 && rModelPart.GetBufferSize() < 3)
KRATOS_THROW_ERROR(std::invalid_argument, "Buffer size too small for fractional step strategy (BDF2), needed 3, got ", rModelPart.GetBufferSize());
if (mTimeOrder == 1 && rModelPart.GetBufferSize() < 2)
KRATOS_THROW_ERROR(std::invalid_argument, "Buffer size too small for fractional step strategy (Backward Euler), needed 2, got ", rModelPart.GetBufferSize());
const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
for (ModelPart::ElementIterator itEl = rModelPart.ElementsBegin(); itEl != rModelPart.ElementsEnd(); ++itEl)
{
ierr = itEl->Check(rCurrentProcessInfo);
if (ierr != 0)
break;
}
/* for ( ModelPart::ConditionIterator itCond = rModelPart.ConditionsBegin(); itCond != rModelPart.ConditionsEnd(); ++itCond) */
/* { */
/* ierr = itCond->Check(rCurrentProcessInfo); */
/* if (ierr != 0) break; */
/* } */
return ierr;
KRATOS_CATCH("");
}
double Solve() override
{
// Initialize BDF2 coefficients
ModelPart &rModelPart = BaseType::GetModelPart();
this->SetTimeCoefficients(rModelPart.GetProcessInfo());
double NormDp = 0.0;
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
double currentTime = rCurrentProcessInfo[TIME];
double timeInterval = rCurrentProcessInfo[DELTA_TIME];
bool timeIntervalChanged = rCurrentProcessInfo[TIME_INTERVAL_CHANGED];
unsigned int maxNonLinearIterations = mMaxPressureIter;
KRATOS_INFO("TwoStepVPStrategy") << "\n Solve with two_step_vp strategy at t=" << currentTime << "s" << std::endl;
if (timeIntervalChanged == true && currentTime > 10 * timeInterval)
{
maxNonLinearIterations *= 2;
}
if (currentTime < 10 * timeInterval)
{
if (BaseType::GetEchoLevel() > 1)
std::cout << "within the first 10 time steps, I consider the given iteration number x3" << std::endl;
maxNonLinearIterations *= 3;
}
if (currentTime < 20 * timeInterval && currentTime >= 10 * timeInterval)
{
if (BaseType::GetEchoLevel() > 1)
std::cout << "within the second 10 time steps, I consider the given iteration number x2" << std::endl;
maxNonLinearIterations *= 2;
}
bool momentumConverged = true;
bool continuityConverged = false;
bool fixedTimeStep = false;
bool momentumAlreadyConverged = false;
bool continuityAlreadyConverged = false;
/* boost::timer solve_step_time; */
// Iterative solution for pressure
/* unsigned int timeStep = rCurrentProcessInfo[STEP]; */
/* if(timeStep==1){ */
/* unsigned int iter=0; */
/* continuityConverged = this->SolveContinuityIteration(iter,maxNonLinearIterations); */
/* }else if(timeStep==2){ */
/* unsigned int iter=0; */
/* momentumConverged = this->SolveMomentumIteration(iter,maxNonLinearIterations,fixedTimeStep); */
/* }else{ */
for (unsigned int it = 0; it < maxNonLinearIterations; ++it)
{
if (BaseType::GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0)
std::cout << "----- > iteration: " << it << std::endl;
momentumConverged = this->SolveMomentumIteration(it, maxNonLinearIterations, fixedTimeStep);
this->UpdateTopology(rModelPart, BaseType::GetEchoLevel());
if ((momentumConverged == true || it == maxNonLinearIterations - 1) && momentumAlreadyConverged == false)
{
// std::ofstream myfile;
// myfile.open ("momentumConvergedIteration.txt",std::ios::app);
// myfile << currentTime << "\t" << it << "\n";
// myfile.close();
momentumAlreadyConverged = true;
}
if ((continuityConverged == true || it == maxNonLinearIterations - 1) && continuityAlreadyConverged == false)
{
// std::ofstream myfile;
// myfile.open ("continuityConvergedIteration.txt",std::ios::app);
// myfile << currentTime << "\t" << it << "\n";
// myfile.close();
continuityAlreadyConverged = true;
}
if (fixedTimeStep == false)
{
continuityConverged = this->SolveContinuityIteration(it, maxNonLinearIterations);
}
if (it == maxNonLinearIterations - 1 || ((continuityConverged && momentumConverged) && it > 2))
{
//this->ComputeErrorL2Norm();
//this->ComputeErrorL2NormCasePoiseuille();
this->UpdateStressStrain();
// std::ofstream myfile;
// myfile.open ("maxConvergedIteration.txt",std::ios::app);
// myfile << currentTime << "\t" << it << "\n";
// myfile.close();
}
if ((continuityConverged && momentumConverged) && it > 2)
{
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, false);
rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, false);
KRATOS_INFO("TwoStepVPStrategy") << "V-P strategy converged in " << it + 1 << " iterations." << std::endl;
break;
}
if (fixedTimeStep == true)
{
break;
}
}
/* } */
if (!continuityConverged && !momentumConverged && BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0)
std::cout << "Convergence tolerance not reached." << std::endl;
/* std::cout << "solve_step_time : " << solve_step_time.elapsed() << std::endl; */
if (mReformDofSet)
this->Clear();
return NormDp;
}
void FinalizeSolutionStep() override
{
/* this->UpdateStressStrain(); */
}
void InitializeSolutionStep() override
{
}
void UpdateTopology(ModelPart &rModelPart, unsigned int echoLevel)
{
KRATOS_TRY;
this->CalculateDisplacementsAndPorosity();
BaseType::MoveMesh();
/* BoundaryNormalsCalculationUtilities BoundaryComputation; */
/* BoundaryComputation.CalculateWeightedBoundaryNormals(rModelPart, echoLevel); */
KRATOS_CATCH("");
}
void CalculatePressureVelocity()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double timeInterval = rCurrentProcessInfo[DELTA_TIME];
unsigned int timeStep = rCurrentProcessInfo[STEP];
for (ModelPart::NodeIterator i = rModelPart.NodesBegin();
i != rModelPart.NodesEnd(); ++i)
{
if (timeStep == 1)
{
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0;
}
else
{
double &CurrentPressure = (i)->FastGetSolutionStepValue(PRESSURE, 0);
double &PreviousPressure = (i)->FastGetSolutionStepValue(PRESSURE, 1);
double &CurrentPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0);
CurrentPressureVelocity = (CurrentPressure - PreviousPressure) / timeInterval;
}
}
}
void CalculatePressureAcceleration()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double timeInterval = rCurrentProcessInfo[DELTA_TIME];
unsigned int timeStep = rCurrentProcessInfo[STEP];
for (ModelPart::NodeIterator i = rModelPart.NodesBegin();
i != rModelPart.NodesEnd(); ++i)
{
if (timeStep == 1)
{
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0;
}
else
{
double &CurrentPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0);
double &PreviousPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1);
double &CurrentPressureAcceleration = (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0);
CurrentPressureAcceleration = (CurrentPressureVelocity - PreviousPressureVelocity) / timeInterval;
}
}
}
virtual void CalculateTemporalVariables()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
Vector &BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS];
for (ModelPart::NodeIterator i = rModelPart.NodesBegin();
i != rModelPart.NodesEnd(); ++i)
{
array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0);
array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1);
array_1d<double, 3> &CurrentAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 0);
array_1d<double, 3> &PreviousAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 1);
/* if((i)->IsNot(ISOLATED) || (i)->Is(SOLID)){ */
if ((i)->IsNot(ISOLATED) && ((i)->IsNot(RIGID) || (i)->Is(SOLID)))
{
UpdateAccelerations(CurrentAcceleration, CurrentVelocity, PreviousAcceleration, PreviousVelocity, BDFcoeffs);
}
else if ((i)->Is(RIGID))
{
array_1d<double, 3> Zeros(3, 0.0);
(i)->FastGetSolutionStepValue(ACCELERATION, 0) = Zeros;
(i)->FastGetSolutionStepValue(ACCELERATION, 1) = Zeros;
}
else
{
(i)->FastGetSolutionStepValue(PRESSURE, 0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE, 1) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0.0;
if ((i)->SolutionStepsDataHas(VOLUME_ACCELERATION))
{
array_1d<double, 3> &VolumeAcceleration = (i)->FastGetSolutionStepValue(VOLUME_ACCELERATION);
(i)->FastGetSolutionStepValue(ACCELERATION, 0) = VolumeAcceleration;
(i)->FastGetSolutionStepValue(VELOCITY, 0) += VolumeAcceleration * rCurrentProcessInfo[DELTA_TIME];
}
}
const double timeInterval = rCurrentProcessInfo[DELTA_TIME];
unsigned int timeStep = rCurrentProcessInfo[STEP];
if (timeStep == 1)
{
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0;
}
else
{
double &CurrentPressure = (i)->FastGetSolutionStepValue(PRESSURE, 0);
double &PreviousPressure = (i)->FastGetSolutionStepValue(PRESSURE, 1);
double &CurrentPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0);
double &CurrentPressureAcceleration = (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0);
CurrentPressureAcceleration = CurrentPressureVelocity / timeInterval;
CurrentPressureVelocity = (CurrentPressure - PreviousPressure) / timeInterval;
CurrentPressureAcceleration += -CurrentPressureVelocity / timeInterval;
}
}
}
void CalculateAccelerations()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
Vector &BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS];
for (ModelPart::NodeIterator i = rModelPart.NodesBegin();
i != rModelPart.NodesEnd(); ++i)
{
array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0);
array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1);
array_1d<double, 3> &CurrentAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 0);
array_1d<double, 3> &PreviousAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 1);
/* if((i)->IsNot(ISOLATED) || (i)->Is(SOLID)){ */
if ((i)->IsNot(ISOLATED) && ((i)->IsNot(RIGID) || (i)->Is(SOLID)))
{
UpdateAccelerations(CurrentAcceleration, CurrentVelocity, PreviousAcceleration, PreviousVelocity, BDFcoeffs);
}
else if ((i)->Is(RIGID))
{
array_1d<double, 3> Zeros(3, 0.0);
(i)->FastGetSolutionStepValue(ACCELERATION, 0) = Zeros;
(i)->FastGetSolutionStepValue(ACCELERATION, 1) = Zeros;
}
else
{
(i)->FastGetSolutionStepValue(PRESSURE, 0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE, 1) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0.0;
if ((i)->SolutionStepsDataHas(VOLUME_ACCELERATION))
{
array_1d<double, 3> &VolumeAcceleration = (i)->FastGetSolutionStepValue(VOLUME_ACCELERATION);
(i)->FastGetSolutionStepValue(ACCELERATION, 0) = VolumeAcceleration;
(i)->FastGetSolutionStepValue(VELOCITY, 0) += VolumeAcceleration * rCurrentProcessInfo[DELTA_TIME];
}
}
}
}
inline void UpdateAccelerations(array_1d<double, 3> &CurrentAcceleration,
const array_1d<double, 3> &CurrentVelocity,
array_1d<double, 3> &PreviousAcceleration,
const array_1d<double, 3> &PreviousVelocity,
Vector &BDFcoeffs)
{
/* noalias(PreviousAcceleration)=CurrentAcceleration; */
noalias(CurrentAcceleration) = -BDFcoeffs[1] * (CurrentVelocity - PreviousVelocity) - PreviousAcceleration;
// std::cout<<"rBDFCoeffs[0] is "<<rBDFCoeffs[0]<<std::endl;//3/(2*delta_t)
// std::cout<<"rBDFCoeffs[1] is "<<rBDFCoeffs[1]<<std::endl;//-2/(delta_t)
// std::cout<<"rBDFCoeffs[2] is "<<rBDFCoeffs[2]<<std::endl;//1/(2*delta_t)
}
virtual void CalculateDisplacementsAndPorosity()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double TimeStep = rCurrentProcessInfo[DELTA_TIME];
for (ModelPart::NodeIterator i = rModelPart.NodesBegin();
i != rModelPart.NodesEnd(); ++i)
{
array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0);
array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1);
array_1d<double, 3> &CurrentDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 0);
array_1d<double, 3> &PreviousDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 1);
/* if( i->IsFixed(DISPLACEMENT_X) == false ) */
CurrentDisplacement[0] = 0.5 * TimeStep * (CurrentVelocity[0] + PreviousVelocity[0]) + PreviousDisplacement[0];
/* if( i->IsFixed(DISPLACEMENT_Y) == false ) */
CurrentDisplacement[1] = 0.5 * TimeStep * (CurrentVelocity[1] + PreviousVelocity[1]) + PreviousDisplacement[1];
/* if( i->IsFixed(DISPLACEMENT_Z) == false ) */
CurrentDisplacement[2] = 0.5 * TimeStep * (CurrentVelocity[2] + PreviousVelocity[2]) + PreviousDisplacement[2];
// currentFluidFractionRate = (currentFluidFraction - previousFluidFraction)/TimeStep;
}
}
void UpdateStressStrain()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
#pragma omp parallel
{
ModelPart::ElementIterator ElemBegin;
ModelPart::ElementIterator ElemEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd);
for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem)
{
/* itElem-> InitializeElementStrainStressState(); */
itElem->InitializeSolutionStep(rCurrentProcessInfo);
}
}
/* this->CalculateAccelerations(); */
/* this->CalculatePressureVelocity(); */
/* this->CalculatePressureAcceleration(); */
this->CalculateTemporalVariables();
}
void Clear() override
{
mpMomentumStrategy->Clear();
mpPressureStrategy->Clear();
}
///@}
///@name Access
///@{
void SetEchoLevel(int Level) override
{
BaseType::SetEchoLevel(Level);
int StrategyLevel = Level > 0 ? Level - 1 : 0;
mpMomentumStrategy->SetEchoLevel(StrategyLevel);
mpPressureStrategy->SetEchoLevel(StrategyLevel);
}
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
std::stringstream buffer;
buffer << "TwoStepVPStrategy";
return buffer.str();
}
/// Print information about this object.
void PrintInfo(std::ostream &rOStream) const override
{
rOStream << "TwoStepVPStrategy";
}
/// Print object's data.
void PrintData(std::ostream &rOStream) const override
{
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected Life Cycle
///@{
///@}
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/// Calculate the coefficients for time iteration.
/**
* @param rCurrentProcessInfo ProcessInfo instance from the fluid ModelPart. Must contain DELTA_TIME and BDF_COEFFICIENTS variables.
*/
void SetTimeCoefficients(ProcessInfo &rCurrentProcessInfo)
{
KRATOS_TRY;
if (mTimeOrder == 2)
{
//calculate the BDF coefficients
double Dt = rCurrentProcessInfo[DELTA_TIME];
double OldDt = rCurrentProcessInfo.GetPreviousTimeStepInfo(1)[DELTA_TIME];
double Rho = OldDt / Dt;
double TimeCoeff = 1.0 / (Dt * Rho * Rho + Dt * Rho);
Vector &BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS];
BDFcoeffs.resize(3, false);
BDFcoeffs[0] = TimeCoeff * (Rho * Rho + 2.0 * Rho); //coefficient for step n+1 (3/2Dt if Dt is constant)
BDFcoeffs[1] = -TimeCoeff * (Rho * Rho + 2.0 * Rho + 1.0); //coefficient for step n (-4/2Dt if Dt is constant)
BDFcoeffs[2] = TimeCoeff; //coefficient for step n-1 (1/2Dt if Dt is constant)
}
else if (mTimeOrder == 1)
{
double Dt = rCurrentProcessInfo[DELTA_TIME];
double TimeCoeff = 1.0 / Dt;
Vector &BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS];
BDFcoeffs.resize(2, false);
BDFcoeffs[0] = TimeCoeff; //coefficient for step n+1 (1/Dt)
BDFcoeffs[1] = -TimeCoeff; //coefficient for step n (-1/Dt)
}
KRATOS_CATCH("");
}
bool SolveMomentumIteration(unsigned int it, unsigned int maxIt, bool &fixedTimeStep)
{
ModelPart &rModelPart = BaseType::GetModelPart();
int Rank = rModelPart.GetCommunicator().MyPID();
bool ConvergedMomentum = false;
double NormDv = 0;
fixedTimeStep = false;
// build momentum system and solve for fractional step velocity increment
rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP, 1);
/* std::cout<<"---- m o m e n t u m e q u a t i o n s ----"<<std::endl; */
if (it == 0)
{
mpMomentumStrategy->InitializeSolutionStep();
}
/* else{ */
/* NormDv = mpMomentumStrategy->Solve(); */
/* } */
NormDv = mpMomentumStrategy->Solve();
if (BaseType::GetEchoLevel() > 1 && Rank == 0)
std::cout << "-------------- s o l v e d ! ------------------" << std::endl;
double DvErrorNorm = 0;
ConvergedMomentum = this->CheckVelocityConvergence(NormDv, DvErrorNorm);
unsigned int iterationForCheck = 3;
KRATOS_INFO("TwoStepVPStrategy") << "iteration(" << it << ") Velocity error: " << DvErrorNorm << " velTol: " << mVelocityTolerance << std::endl;
// Check convergence
if (it == maxIt - 1)
{
KRATOS_INFO("TwoStepVPStrategy") << "iteration(" << it << ") Final Velocity error: " << DvErrorNorm << " velTol: " << mVelocityTolerance << std::endl;
fixedTimeStep = this->FixTimeStepMomentum(DvErrorNorm);
}
else if (it > iterationForCheck)
{
fixedTimeStep = this->CheckMomentumConvergence(DvErrorNorm);
}
// ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo();
// double currentTime = rCurrentProcessInfo[TIME];
// double tolerance=0.0000000001;
// if(currentTime>(0.25-tolerance) && currentTime<(0.25+tolerance)){
// std::ofstream myfile;
// myfile.open ("velocityConvergenceAt025s.txt",std::ios::app);
// myfile << it << "\t" << DvErrorNorm << "\n";
// myfile.close();
// }
// else if(currentTime>(0.5-tolerance) && currentTime<(0.5+tolerance)){
// std::ofstream myfile;
// myfile.open ("velocityConvergenceAt05s.txt",std::ios::app);
// myfile << it << "\t" << DvErrorNorm << "\n";
// myfile.close();
// }
// else if(currentTime>(0.75-tolerance) && currentTime<(0.75+tolerance)){
// std::ofstream myfile;
// myfile.open ("velocityConvergenceAt075s.txt",std::ios::app);
// myfile << it << "\t" << DvErrorNorm << "\n";
// myfile.close();
// }
// else if(currentTime>(1.0-tolerance) && currentTime<(1.0+tolerance)){
// std::ofstream myfile;
// myfile.open ("velocityConvergenceAt100s.txt",std::ios::app);
// myfile << it << "\t" << DvErrorNorm << "\n";
// myfile.close();
// }
if (!ConvergedMomentum && BaseType::GetEchoLevel() > 0 && Rank == 0)
std::cout << "Momentum equations did not reach the convergence tolerance." << std::endl;
return ConvergedMomentum;
}
bool SolveContinuityIteration(unsigned int it, unsigned int maxIt)
{
ModelPart &rModelPart = BaseType::GetModelPart();
int Rank = rModelPart.GetCommunicator().MyPID();
bool ConvergedContinuity = false;
double NormDp = 0;
// 2. Pressure solution
rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP, 5);
/* std::cout<<" ---- c o n t i n u i t y e q u a t i o n ----"<<std::endl; */
if (it == 0)
{
mpPressureStrategy->InitializeSolutionStep();
}
/* else{ */
/* NormDp = mpPressureStrategy->Solve(); */
/* } */
NormDp = mpPressureStrategy->Solve();
if (BaseType::GetEchoLevel() > 0 && Rank == 0)
std::cout << "The norm of pressure is: " << NormDp << std::endl;
double DpErrorNorm = 0;
ConvergedContinuity = this->CheckPressureConvergence(NormDp, DpErrorNorm);
KRATOS_INFO("TwoStepVPStrategy") << " iteration(" << it << ") Pressure error: " << DpErrorNorm << " presTol: " << mPressureTolerance << std::endl;
// Check convergence
if (it == maxIt - 1)
{
KRATOS_INFO("TwoStepVPStrategy") << " iteration(" << it << ") Final Pressure error: " << DpErrorNorm << " presTol: " << mPressureTolerance << std::endl;
ConvergedContinuity = this->FixTimeStepContinuity(DpErrorNorm);
}
// ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo();
// double currentTime = rCurrentProcessInfo[TIME];
// double tolerance=0.0000000001;
// if(currentTime>(0.25-tolerance) && currentTime<(0.25+tolerance)){
// std::ofstream myfile;
// myfile.open ("pressureConvergenceAt025s.txt",std::ios::app);
// myfile << it << "\t" << DpErrorNorm << "\n";
// myfile.close();
// }
// else if(currentTime>(0.5-tolerance) && currentTime<(0.5+tolerance)){
// std::ofstream myfile;
// myfile.open ("pressureConvergenceAt05s.txt",std::ios::app);
// myfile << it << "\t" << DpErrorNorm << "\n";
// myfile.close();
// }
// else if(currentTime>(0.75-tolerance) && currentTime<(0.75+tolerance)){
// std::ofstream myfile;
// myfile.open ("pressureConvergenceAt075s.txt",std::ios::app);
// myfile << it << "\t" << DpErrorNorm << "\n";
// myfile.close();
// }
// else if(currentTime>(1.0-tolerance) && currentTime<(1.0+tolerance)){
// std::ofstream myfile;
// myfile.open ("pressureConvergenceAt100s.txt",std::ios::app);
// myfile << it << "\t" << DpErrorNorm << "\n";
// myfile.close();
// }
if (!ConvergedContinuity && BaseType::GetEchoLevel() > 0 && Rank == 0)
std::cout << "Continuity equation did not reach the convergence tolerance." << std::endl;
return ConvergedContinuity;
}
void ComputeErrorL2Norm()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double currentTime = rCurrentProcessInfo[TIME];
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
long double sumErrorL2Velocity = 0;
long double sumErrorL2VelocityX = 0;
long double sumErrorL2VelocityY = 0;
long double sumErrorL2Pressure = 0;
long double sumErrorL2TauXX = 0;
long double sumErrorL2TauYY = 0;
long double sumErrorL2TauXY = 0;
#pragma omp parallel
{
ModelPart::ElementIterator ElemBegin;
ModelPart::ElementIterator ElemEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd);
for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem)
{
Element::GeometryType &geometry = itElem->GetGeometry();
long double nodalArea = 0;
if (dimension == 2)
{
nodalArea = geometry.Area() / 3.0;
}
else if (dimension == 3)
{
nodalArea = geometry.Volume() * 0.25;
}
long double bariPosX = 0;
long double bariPosY = 0;
long double eleErrorL2Velocity = 0;
long double eleErrorL2VelocityX = 0;
long double eleErrorL2VelocityY = 0;
long double eleErrorL2Pressure = 0;
//ShapeFunctionDerivativesArrayType DN_DX;
Matrix NContainer;
NContainer = geometry.ShapeFunctionsValues(GeometryData::GI_GAUSS_1);
//this->CalculateGeometryData(DN_DX,NContainer,GaussWeights);
const Vector &N = row(NContainer, 0);
// itElem->EvaluateInPoint(elementalPressure,PRESSURE,N);
const unsigned int NumNodes = geometry.size();
double elementalPressure = N[0] * geometry(0)->FastGetSolutionStepValue(PRESSURE);
double elementalVelocityX = N[0] * geometry(0)->FastGetSolutionStepValue(VELOCITY_X);
double elementalVelocityY = N[0] * geometry(0)->FastGetSolutionStepValue(VELOCITY_Y);
;
for (unsigned int i = 1; i < NumNodes; i++)
{
elementalPressure += N[i] * geometry(i)->FastGetSolutionStepValue(PRESSURE);
elementalVelocityX += N[i] * geometry(i)->FastGetSolutionStepValue(VELOCITY_X);
elementalVelocityY += N[i] * geometry(i)->FastGetSolutionStepValue(VELOCITY_Y);
}
for (unsigned int i = 0; i < geometry.size(); i++)
{
// index = i*dimension;
const long double nodalPosX = geometry(i)->X();
const long double nodalPosY = geometry(i)->Y();
// const long double velX = geometry(i)->FastGetSolutionStepValue(VELOCITY_X);
// const long double velY = geometry(i)->FastGetSolutionStepValue(VELOCITY_Y);
// const long double pressure = geometry(i)->FastGetSolutionStepValue(PRESSURE);
// long double expectedVelocityX = pow(posX,2) * (1.0-posX)*(1.0-posX) * ( 2.0*posY - 6.0*pow(posY,2) + 4.0*pow(posY,3) );
// long double expectedVelocityY = -pow(posY,2) * (1.0-posY)*(1.0-posY) * ( 2.0*posX - 6.0*pow(posX,2) + 4.0*pow(posX,3) );
// long double expectedPressure = -posX * (1.0-posX);
// long double nodalErrorVelocityX = velX - expectedVelocityX;
// long double nodalErrorVelocityY = velY - expectedVelocityY;
// long double nodalErrorPressure = pressure - expectedPressure;
// sumErrorL2Velocity += (pow(nodalErrorVelocityX,2) + pow(nodalErrorVelocityY,2)) * nodalArea;
// sumErrorL2VelocityX += pow(nodalErrorVelocityX,2) * nodalArea;
// sumErrorL2VelocityY += pow(nodalErrorVelocityY,2) * nodalArea;
// sumErrorL2Pressure += pow(nodalErrorPressure,2) * nodalArea;
// eleErrorL2Velocity += pow(nodalErrorVelocityX,2) + pow(nodalErrorVelocityY,2);
// eleErrorL2VelocityX += pow(nodalErrorVelocityX,2);
// eleErrorL2VelocityY += pow(nodalErrorVelocityY,2);
// eleErrorL2Pressure += pow(nodalErrorPressure,2);
bariPosX += nodalPosX / 3.0;
bariPosY += nodalPosY / 3.0;
}
const long double posX = bariPosX;
const long double posY = bariPosY;
long double expectedVelocityX = pow(posX, 2) * (1.0 - posX) * (1.0 - posX) * (2.0 * posY - 6.0 * pow(posY, 2) + 4.0 * pow(posY, 3));
long double expectedVelocityY = -pow(posY, 2) * (1.0 - posY) * (1.0 - posY) * (2.0 * posX - 6.0 * pow(posX, 2) + 4.0 * pow(posX, 3));
long double expectedPressure = -posX * (1.0 - posX);
eleErrorL2VelocityX = elementalVelocityX - expectedVelocityX;
eleErrorL2VelocityY = elementalVelocityY - expectedVelocityY;
eleErrorL2Pressure = elementalPressure - expectedPressure;
sumErrorL2VelocityX += pow(eleErrorL2VelocityX, 2) * geometry.Area();
sumErrorL2VelocityY += pow(eleErrorL2VelocityY, 2) * geometry.Area();
sumErrorL2Pressure += pow(eleErrorL2Pressure, 2) * geometry.Area();
// sumErrorL2Velocity += eleErrorL2Velocity * geometry.Area();
// sumErrorL2VelocityX += eleErrorL2VelocityX * geometry.Area();
// sumErrorL2VelocityY += eleErrorL2VelocityY * geometry.Area();
// sumErrorL2Pressure += eleErrorL2Pressure * geometry.Area();
const long double tauXX = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_XX);
const long double tauYY = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_YY);
const long double tauXY = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_XY);
long double expectedTauXX = 2.0 * (-4.0 * (1.0 - bariPosX) * bariPosX * (-1.0 + 2.0 * bariPosX) * bariPosY * (1.0 - 3.0 * bariPosY + 2.0 * pow(bariPosY, 2)));
long double expectedTauYY = 2.0 * (4.0 * bariPosX * (1.0 - 3.0 * bariPosX + 2.0 * pow(bariPosX, 2)) * (1.0 - bariPosY) * bariPosY * (-1.0 + 2.0 * bariPosY));
long double expectedTauXY = (2.0 * (1.0 - 6.0 * bariPosY + 6.0 * pow(bariPosY, 2)) * (1.0 - bariPosX) * (1.0 - bariPosX) * pow(bariPosX, 2) - 2.0 * (1.0 - 6.0 * bariPosX + 6.0 * pow(bariPosX, 2)) * (1.0 - bariPosY) * (1 - bariPosY) * pow(bariPosY, 2));
long double nodalErrorTauXX = tauXX - expectedTauXX;
long double nodalErrorTauYY = tauYY - expectedTauYY;
long double nodalErrorTauXY = tauXY - expectedTauXY;
// std::cout<<"tauXX "<<tauXX<<" expectedtauXX "<<expectedTauXX<<" nodalErrorTauXX "<<nodalErrorTauXX<<std::endl;
// std::cout<<"tauyy "<<tauYY<<" expectedtauYY "<<expectedTauYY<<" nodalErrorTauYY "<<nodalErrorTauYY<<std::endl;
// std::cout<<"tauXY "<<tauXY<<" expectedtauXY "<<expectedTauXY<<" nodalErrorTauXY "<<nodalErrorTauXY<<std::endl;
sumErrorL2TauXX += pow(nodalErrorTauXX, 2) * geometry.Area();
sumErrorL2TauYY += pow(nodalErrorTauYY, 2) * geometry.Area();
sumErrorL2TauXY += pow(nodalErrorTauXY, 2) * geometry.Area();
}
}
// long double errorL2Velocity = sumErrorL2Velocity;
// long double errorL2VelocityX = sumErrorL2VelocityX;
// long double errorL2VelocityY = sumErrorL2VelocityY;
// long double errorL2Pressure = sumErrorL2Pressure;
long double errorL2Velocity = sqrt(sumErrorL2Velocity);
long double errorL2VelocityX = sqrt(sumErrorL2VelocityX);
long double errorL2VelocityY = sqrt(sumErrorL2VelocityY);
long double errorL2Pressure = sqrt(sumErrorL2Pressure);
long double errorL2TauXX = sqrt(sumErrorL2TauXX);
long double errorL2TauYY = sqrt(sumErrorL2TauYY);
long double errorL2TauXY = sqrt(sumErrorL2TauXY);
std::ofstream myfileVelocity;
myfileVelocity.open("errorL2VelocityFile.txt", std::ios::app);
myfileVelocity << currentTime << "\t" << errorL2Velocity << "\n";
myfileVelocity.close();
std::ofstream myfileVelocityX;
myfileVelocityX.open("errorL2VelocityXFile.txt", std::ios::app);
myfileVelocityX << currentTime << "\t" << errorL2VelocityX << "\n";
myfileVelocityX.close();
std::ofstream myfileVelocityY;
myfileVelocityY.open("errorL2VelocityYFile.txt", std::ios::app);
myfileVelocityY << currentTime << "\t" << errorL2VelocityY << "\n";
myfileVelocityY.close();
std::ofstream myfilePressure;
myfilePressure.open("errorL2PressureFile.txt", std::ios::app);
myfilePressure << currentTime << "\t" << errorL2Pressure << "\n";
myfilePressure.close();
std::ofstream myfileTauXX;
myfileTauXX.open("errorL2TauXXFile.txt", std::ios::app);
myfileTauXX << currentTime << "\t" << errorL2TauXX << "\n";
myfileTauXX.close();
std::ofstream myfileTauYY;
myfileTauYY.open("errorL2TauYYFile.txt", std::ios::app);
myfileTauYY << currentTime << "\t" << errorL2TauYY << "\n";
myfileTauYY.close();
std::ofstream myfileTauXY;
myfileTauXY.open("errorL2TauXYFile.txt", std::ios::app);
myfileTauXY << currentTime << "\t" << errorL2TauXY << "\n";
myfileTauXY.close();
}
void ComputeErrorL2NormCasePoiseuille()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double currentTime = rCurrentProcessInfo[TIME];
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
double sumErrorL2VelocityTheta = 0;
double sumErrorL2TauTheta = 0;
double r_in = 0.2;
double R_out = 0.5;
double kappa = r_in / R_out;
double omega = 0.5;
double viscosity = 100.0;
#pragma omp parallel
{
ModelPart::ElementIterator ElemBegin;
ModelPart::ElementIterator ElemEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd);
for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem)
{
Element::GeometryType &geometry = itElem->GetGeometry();
long double nodalArea = 0;
if (dimension == 2)
{
nodalArea = geometry.Area() / 3.0;
}
else if (dimension == 3)
{
nodalArea = geometry.Volume() * 0.25;
}
long double bariPosX = 0;
long double bariPosY = 0;
long double eleErrorL2Velocity = 0;
long double eleErrorL2VelocityX = 0;
long double eleErrorL2VelocityY = 0;
long double eleErrorL2Pressure = 0;
//ShapeFunctionDerivativesArrayType DN_DX;
Matrix NContainer;
NContainer = geometry.ShapeFunctionsValues(GeometryData::GI_GAUSS_1);
//this->CalculateGeometryData(DN_DX,NContainer,GaussWeights);
const Vector &N = row(NContainer, 0);
// itElem->EvaluateInPoint(elementalPressure,PRESSURE,N);
const unsigned int NumNodes = geometry.size();
double elementalPressure = N[0] * geometry(0)->FastGetSolutionStepValue(PRESSURE);
double elementalVelocityX = N[0] * geometry(0)->FastGetSolutionStepValue(VELOCITY_X);
double elementalVelocityY = N[0] * geometry(0)->FastGetSolutionStepValue(VELOCITY_Y);
;
for (unsigned int i = 1; i < NumNodes; i++)
{
elementalPressure += N[i] * geometry(i)->FastGetSolutionStepValue(PRESSURE);
elementalVelocityX += N[i] * geometry(i)->FastGetSolutionStepValue(VELOCITY_X);
elementalVelocityY += N[i] * geometry(i)->FastGetSolutionStepValue(VELOCITY_Y);
}
for (unsigned int i = 0; i < geometry.size(); i++)
{
// index = i*dimension;
const long double nodalPosX = geometry(i)->X();
const long double nodalPosY = geometry(i)->Y();
bariPosX += nodalPosX / 3.0;
bariPosY += nodalPosY / 3.0;
}
const long double posX = bariPosX;
const long double posY = bariPosY;
const double rPos = sqrt(pow(posX, 2) + pow(posY, 2));
const double cosalfa = posX / rPos;
const double sinalfa = posY / rPos;
const double sin2alfa = 2.0 * cosalfa * sinalfa;
const double cos2alfa = 1.0 - 2.0 * pow(sinalfa, 2);
double expectedVelocityTheta = pow(kappa, 2) * omega * R_out / (1.0 - pow(kappa, 2)) * (R_out / rPos - rPos / R_out);
double computedVelocityTheta = sqrt(pow(elementalVelocityX, 2) + pow(elementalVelocityY, 2));
double nodalErrorVelocityTheta = computedVelocityTheta - expectedVelocityTheta;
const long double tauXX = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_XX);
const long double tauYY = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_YY);
const long double tauXY = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_XY);
double expectedTauTheta = (2.0 * viscosity * pow(kappa, 2) * omega * pow(R_out, 2)) / (1.0 - pow(kappa, 2)) / pow(rPos, 2);
double computedTauTheta = (tauXX - tauYY) * sin2alfa / 2.0 - tauXY * cos2alfa;
double nodalErrorTauTheta = computedTauTheta - expectedTauTheta;
sumErrorL2VelocityTheta += pow(nodalErrorVelocityTheta, 2) * geometry.Area();
sumErrorL2TauTheta += pow(nodalErrorTauTheta, 2) * geometry.Area();
}
}
double errorL2VelocityTheta = sqrt(sumErrorL2VelocityTheta);
double errorL2TauTheta = sqrt(sumErrorL2TauTheta);
std::ofstream myfileVelocity;
myfileVelocity.open("errorL2Poiseuille.txt", std::ios::app);
myfileVelocity << currentTime << "\t" << errorL2VelocityTheta << "\t" << errorL2TauTheta << "\n";
myfileVelocity.close();
}
bool CheckVelocityConvergence(const double NormDv, double &errorNormDv)
{
ModelPart &rModelPart = BaseType::GetModelPart();
double NormV = 0.00;
errorNormDv = 0;
#pragma omp parallel reduction(+ \
: NormV)
{
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
const array_1d<double, 3> &Vel = itNode->FastGetSolutionStepValue(VELOCITY);
double NormVelNode = 0;
for (unsigned int d = 0; d < 3; ++d)
{
NormVelNode += Vel[d] * Vel[d];
NormV += Vel[d] * Vel[d];
}
}
}
BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormV);
NormV = sqrt(NormV);
if (NormV == 0.0)
NormV = 1.00;
errorNormDv = NormDv / NormV;
if (BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0)
{
std::cout << "The norm of velocity increment is: " << NormDv << std::endl;
std::cout << "The norm of velocity is: " << NormV << std::endl;
std::cout << "Velocity error: " << errorNormDv << "mVelocityTolerance: " << mVelocityTolerance << std::endl;
}
/* else{ */
/* std::cout<<"Velocity error: "<< errorNormDv <<" velTol: " << mVelocityTolerance<< std::endl; */
/* } */
if (errorNormDv < mVelocityTolerance)
{
return true;
}
else
{
return false;
}
}
bool CheckPressureConvergence(const double NormDp, double &errorNormDp)
{
ModelPart &rModelPart = BaseType::GetModelPart();
double NormP = 0.00;
errorNormDp = 0;
#pragma omp parallel reduction(+ \
: NormP)
{
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
const double Pr = itNode->FastGetSolutionStepValue(PRESSURE);
NormP += Pr * Pr;
}
}
BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormP);
NormP = sqrt(NormP);
if (NormP == 0.0)
NormP = 1.00;
errorNormDp = NormDp / NormP;
if (BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0)
{
std::cout << " The norm of pressure increment is: " << NormDp << std::endl;
std::cout << " The norm of pressure is: " << NormP << std::endl;
std::cout << " Pressure error: " << errorNormDp << std::endl;
}
/* else{ */
/* std::cout<<" Pressure error: "<<errorNormDp <<" presTol: "<<mPressureTolerance << std::endl; */
/* } */
if (errorNormDp < mPressureTolerance)
{
return true;
}
else
return false;
}
bool FixTimeStepMomentum(const double DvErrorNorm)
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
double currentTime = rCurrentProcessInfo[TIME];
double timeInterval = rCurrentProcessInfo[DELTA_TIME];
double minTolerance = 0.005;
bool fixedTimeStep = false;
if (currentTime < 10 * timeInterval)
{
minTolerance = 10;
}
bool isItNan = false;
isItNan = std::isnan(DvErrorNorm);
bool isItInf = false;
isItInf = std::isinf(DvErrorNorm);
if ((DvErrorNorm > minTolerance || (DvErrorNorm < 0 && DvErrorNorm > 0) || (DvErrorNorm != DvErrorNorm) || isItNan == true || isItInf == true) && DvErrorNorm != 0 && DvErrorNorm != 1)
{
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, true);
std::cout << "NOT GOOD CONVERGENCE!!! I'll reduce the next time interval" << DvErrorNorm << std::endl;
minTolerance = 0.05;
if (DvErrorNorm > minTolerance)
{
std::cout << "BAD CONVERGENCE!!! I GO AHEAD WITH THE PREVIOUS VELOCITY AND PRESSURE FIELDS" << DvErrorNorm << std::endl;
fixedTimeStep = true;
#pragma omp parallel
{
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
itNode->FastGetSolutionStepValue(VELOCITY, 0) = itNode->FastGetSolutionStepValue(VELOCITY, 1);
itNode->FastGetSolutionStepValue(PRESSURE, 0) = itNode->FastGetSolutionStepValue(PRESSURE, 1);
itNode->FastGetSolutionStepValue(ACCELERATION, 0) = itNode->FastGetSolutionStepValue(ACCELERATION, 1);
}
}
}
}
else
{
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, false);
}
return fixedTimeStep;
}
bool CheckMomentumConvergence(const double DvErrorNorm)
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
double minTolerance = 0.99999;
bool fixedTimeStep = false;
bool isItNan = false;
isItNan = std::isnan(DvErrorNorm);
bool isItInf = false;
isItInf = std::isinf(DvErrorNorm);
if ((DvErrorNorm > minTolerance || (DvErrorNorm < 0 && DvErrorNorm > 0) || (DvErrorNorm != DvErrorNorm) || isItNan == true || isItInf == true) && DvErrorNorm != 0 && DvErrorNorm != 1)
{
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, true);
std::cout << " BAD CONVERGENCE DETECTED DURING THE ITERATIVE LOOP!!! error: " << DvErrorNorm << " higher than 0.9999" << std::endl;
std::cout << " I GO AHEAD WITH THE PREVIOUS VELOCITY AND PRESSURE FIELDS" << std::endl;
fixedTimeStep = true;
#pragma omp parallel
{
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
itNode->FastGetSolutionStepValue(VELOCITY, 0) = itNode->FastGetSolutionStepValue(VELOCITY, 1);
itNode->FastGetSolutionStepValue(PRESSURE, 0) = itNode->FastGetSolutionStepValue(PRESSURE, 1);
itNode->FastGetSolutionStepValue(ACCELERATION, 0) = itNode->FastGetSolutionStepValue(ACCELERATION, 1);
}
}
}
else
{
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, false);
}
return fixedTimeStep;
}
bool FixTimeStepContinuity(const double DvErrorNorm)
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
double currentTime = rCurrentProcessInfo[TIME];
double timeInterval = rCurrentProcessInfo[DELTA_TIME];
double minTolerance = 0.01;
bool fixedTimeStep = false;
if (currentTime < 10 * timeInterval)
{
minTolerance = 10;
}
bool isItNan = false;
isItNan = std::isnan(DvErrorNorm);
bool isItInf = false;
isItInf = std::isinf(DvErrorNorm);
if ((DvErrorNorm > minTolerance || (DvErrorNorm < 0 && DvErrorNorm > 0) || (DvErrorNorm != DvErrorNorm) || isItNan == true || isItInf == true) && DvErrorNorm != 0 && DvErrorNorm != 1)
{
fixedTimeStep = true;
rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, true);
}
else
{
rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, false);
}
return fixedTimeStep;
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
double mVelocityTolerance;
double mPressureTolerance;
unsigned int mMaxPressureIter;
unsigned int mDomainSize;
unsigned int mTimeOrder;
bool mReformDofSet;
// Fractional step index.
/* 1 : Momentum step (calculate fractional step velocity)
* 2-3 : Unused (reserved for componentwise calculation of frac step velocity)
* 4 : Pressure step
* 5 : Computation of projections
* 6 : End of step velocity
*/
// unsigned int mStepId;
/// Scheme for the solution of the momentum equation
StrategyPointerType mpMomentumStrategy;
/// Scheme for the solution of the mass equation
StrategyPointerType mpPressureStrategy;
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
virtual void InitializeStrategy(SolverSettingsType &rSolverConfig)
{
KRATOS_TRY;
mTimeOrder = rSolverConfig.GetTimeOrder();
// Check that input parameters are reasonable and sufficient.
this->Check();
//ModelPart& rModelPart = this->GetModelPart();
mDomainSize = rSolverConfig.GetDomainSize();
mReformDofSet = rSolverConfig.GetReformDofSet();
BaseType::SetEchoLevel(rSolverConfig.GetEchoLevel());
// Initialize strategies for each step
bool HaveVelStrategy = rSolverConfig.FindStrategy(SolverSettingsType::Velocity, mpMomentumStrategy);
if (HaveVelStrategy)
{
rSolverConfig.FindTolerance(SolverSettingsType::Velocity, mVelocityTolerance);
/* rSolverConfig.FindMaxIter(SolverSettingsType::Velocity,mMaxVelocityIter); */
}
else
{
KRATOS_THROW_ERROR(std::runtime_error, "TwoStepVPStrategy error: No Velocity strategy defined in FractionalStepSettings", "");
}
bool HavePressStrategy = rSolverConfig.FindStrategy(SolverSettingsType::Pressure, mpPressureStrategy);
if (HavePressStrategy)
{
rSolverConfig.FindTolerance(SolverSettingsType::Pressure, mPressureTolerance);
rSolverConfig.FindMaxIter(SolverSettingsType::Pressure, mMaxPressureIter);
}
else
{
KRATOS_THROW_ERROR(std::runtime_error, "TwoStepVPStrategy error: No Pressure strategy defined in FractionalStepSettings", "");
}
// Check input parameters
this->Check();
KRATOS_CATCH("");
}
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
TwoStepVPStrategy &operator=(TwoStepVPStrategy const &rOther) {}
/// Copy constructor.
TwoStepVPStrategy(TwoStepVPStrategy const &rOther) {}
///@}
}; /// Class TwoStepVPStrategy
///@}
///@name Type Definitions
///@{
///@}
///@} // addtogroup
} // namespace Kratos.
#endif // KRATOS_TWO_STEP_V_P_STRATEGY_H
|
StmtOpenMP.h | //===- StmtOpenMP.h - Classes for OpenMP directives ------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
/// \file
/// \brief This file defines OpenMP AST classes for executable directives and
/// clauses.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_STMTOPENMP_H
#define LLVM_CLANG_AST_STMTOPENMP_H
#include "clang/AST/Expr.h"
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/Stmt.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/SourceLocation.h"
namespace clang {
//===----------------------------------------------------------------------===//
// AST classes for directives.
//===----------------------------------------------------------------------===//
/// \brief This is a basic class for representing single OpenMP executable
/// directive.
///
class OMPExecutableDirective : public Stmt {
friend class ASTStmtReader;
/// \brief Kind of the directive.
OpenMPDirectiveKind Kind;
/// \brief Starting location of the directive (directive keyword).
SourceLocation StartLoc;
/// \brief Ending location of the directive.
SourceLocation EndLoc;
/// \brief Numbers of clauses.
const unsigned NumClauses;
/// \brief Number of child expressions/stmts.
const unsigned NumChildren;
/// \brief Offset from this to the start of clauses.
/// There are NumClauses pointers to clauses, they are followed by
/// NumChildren pointers to child stmts/exprs (if the directive type
/// requires an associated stmt, then it has to be the first of them).
const unsigned ClausesOffset;
/// \brief Get the clauses storage.
MutableArrayRef<OMPClause *> getClauses() {
OMPClause **ClauseStorage = reinterpret_cast<OMPClause **>(
reinterpret_cast<char *>(this) + ClausesOffset);
return MutableArrayRef<OMPClause *>(ClauseStorage, NumClauses);
}
protected:
/// \brief Build instance of directive of class \a K.
///
/// \param SC Statement class.
/// \param K Kind of OpenMP directive.
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending location of the directive.
///
template <typename T>
OMPExecutableDirective(const T *, StmtClass SC, OpenMPDirectiveKind K,
SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses, unsigned NumChildren)
: Stmt(SC), Kind(K), StartLoc(std::move(StartLoc)),
EndLoc(std::move(EndLoc)), NumClauses(NumClauses),
NumChildren(NumChildren),
ClausesOffset(llvm::RoundUpToAlignment(sizeof(T),
llvm::alignOf<OMPClause *>())) {}
/// \brief Sets the list of variables for this clause.
///
/// \param Clauses The list of clauses for the directive.
///
void setClauses(ArrayRef<OMPClause *> Clauses);
/// \brief Set the associated statement for the directive.
///
/// /param S Associated statement.
///
void setAssociatedStmt(Stmt *S) {
assert(hasAssociatedStmt() && "no associated statement.");
*child_begin() = S;
}
public:
/// \brief Iterates over a filtered subrange of clauses applied to a
/// directive.
///
/// This iterator visits only those declarations that meet some run-time
/// criteria.
template <class FilterPredicate> class filtered_clause_iterator {
protected:
ArrayRef<OMPClause *>::const_iterator Current;
ArrayRef<OMPClause *>::const_iterator End;
FilterPredicate Pred;
void SkipToNextClause() {
while (Current != End && !Pred(*Current))
++Current;
}
public:
typedef const OMPClause *value_type;
filtered_clause_iterator() : Current(), End() {}
filtered_clause_iterator(ArrayRef<OMPClause *> Arr, FilterPredicate Pred)
: Current(Arr.begin()), End(Arr.end()), Pred(std::move(Pred)) {
SkipToNextClause();
}
value_type operator*() const { return *Current; }
value_type operator->() const { return *Current; }
filtered_clause_iterator &operator++() {
++Current;
SkipToNextClause();
return *this;
}
filtered_clause_iterator operator++(int) {
filtered_clause_iterator tmp(*this);
++(*this);
return tmp;
}
bool operator!() { return Current == End; }
explicit operator bool() { return Current != End; }
bool empty() const { return Current == End; }
};
template <typename Fn>
filtered_clause_iterator<Fn> getFilteredClauses(Fn &&fn) const {
return filtered_clause_iterator<Fn>(clauses(), std::move(fn));
}
struct ClauseKindFilter {
OpenMPClauseKind Kind;
bool operator()(const OMPClause *clause) const {
return clause->getClauseKind() == Kind;
}
};
filtered_clause_iterator<ClauseKindFilter>
getClausesOfKind(OpenMPClauseKind Kind) const {
return getFilteredClauses(ClauseKindFilter{Kind});
}
/// \brief Gets a single clause of the specified kind \a K associated with the
/// current directive iff there is only one clause of this kind (and assertion
/// is fired if there is more than one clause is associated with the
/// directive). Returns nullptr if no clause of kind \a K is associated with
/// the directive.
const OMPClause *getSingleClause(OpenMPClauseKind K) const;
/// \brief Returns starting location of directive kind.
SourceLocation getLocStart() const { return StartLoc; }
/// \brief Returns ending location of directive.
SourceLocation getLocEnd() const { return EndLoc; }
/// \brief Set starting location of directive kind.
///
/// \param Loc New starting location of directive.
///
void setLocStart(SourceLocation Loc) { StartLoc = Loc; }
/// \brief Set ending location of directive.
///
/// \param Loc New ending location of directive.
///
void setLocEnd(SourceLocation Loc) { EndLoc = Loc; }
/// \brief Get number of clauses.
unsigned getNumClauses() const { return NumClauses; }
/// \brief Returns specified clause.
///
/// \param i Number of clause.
///
OMPClause *getClause(unsigned i) const { return clauses()[i]; }
/// \brief Returns true if directive has associated statement.
bool hasAssociatedStmt() const { return NumChildren > 0; }
/// \brief Returns statement associated with the directive.
Stmt *getAssociatedStmt() const {
assert(hasAssociatedStmt() && "no associated statement.");
return const_cast<Stmt *>(*child_begin());
}
OpenMPDirectiveKind getDirectiveKind() const { return Kind; }
static bool classof(const Stmt *S) {
return S->getStmtClass() >= firstOMPExecutableDirectiveConstant &&
S->getStmtClass() <= lastOMPExecutableDirectiveConstant;
}
child_range children() {
if (!hasAssociatedStmt())
return child_range();
Stmt **ChildStorage = reinterpret_cast<Stmt **>(getClauses().end());
return child_range(ChildStorage, ChildStorage + NumChildren);
}
ArrayRef<OMPClause *> clauses() { return getClauses(); }
ArrayRef<OMPClause *> clauses() const {
return const_cast<OMPExecutableDirective *>(this)->getClauses();
}
};
/// \brief This represents '#pragma omp parallel' directive.
///
/// \code
/// #pragma omp parallel private(a,b) reduction(+: c,d)
/// \endcode
/// In this example directive '#pragma omp parallel' has clauses 'private'
/// with the variables 'a' and 'b' and 'reduction' with operator '+' and
/// variables 'c' and 'd'.
///
class OMPParallelDirective : public OMPExecutableDirective {
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending Location of the directive.
///
OMPParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPParallelDirectiveClass, OMPD_parallel,
StartLoc, EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPParallelDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPParallelDirectiveClass, OMPD_parallel,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement associated with the directive.
///
static OMPParallelDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a N clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPParallelDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelDirectiveClass;
}
};
/// \brief This is a common base class for loop directives ('omp simd', 'omp
/// for', 'omp for simd' etc.). It is responsible for the loop code generation.
///
class OMPLoopDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Number of collapsed loops as specified by 'collapse' clause.
unsigned CollapsedNum;
/// \brief Offsets to the stored exprs.
/// This enumeration contains offsets to all the pointers to children
/// expressions stored in OMPLoopDirective.
/// The first 9 children are nesessary for all the loop directives, and
/// the next 7 are specific to the worksharing ones.
/// After the fixed children, three arrays of length CollapsedNum are
/// allocated: loop counters, their updates and final values.
///
enum {
AssociatedStmtOffset = 0,
IterationVariableOffset = 1,
LastIterationOffset = 2,
CalcLastIterationOffset = 3,
PreConditionOffset = 4,
CondOffset = 5,
InitOffset = 6,
IncOffset = 7,
// The '...End' enumerators do not correspond to child expressions - they
// specify the offset to the end (and start of the following counters/
// updates/finals arrays).
DefaultEnd = 8,
// The following 7 exprs are used by worksharing loops only.
IsLastIterVariableOffset = 8,
LowerBoundVariableOffset = 9,
UpperBoundVariableOffset = 10,
StrideVariableOffset = 11,
EnsureUpperBoundOffset = 12,
NextLowerBoundOffset = 13,
NextUpperBoundOffset = 14,
// Offset to the end (and start of the following counters/updates/finals
// arrays) for worksharing loop directives.
WorksharingEnd = 15,
};
/// \brief Get the counters storage.
MutableArrayRef<Expr *> getCounters() {
Expr **Storage = reinterpret_cast<Expr **>(
&(*(std::next(child_begin(), getArraysOffset(getDirectiveKind())))));
return MutableArrayRef<Expr *>(Storage, CollapsedNum);
}
/// \brief Get the updates storage.
MutableArrayRef<Expr *> getUpdates() {
Expr **Storage = reinterpret_cast<Expr **>(
&*std::next(child_begin(),
getArraysOffset(getDirectiveKind()) + CollapsedNum));
return MutableArrayRef<Expr *>(Storage, CollapsedNum);
}
/// \brief Get the final counter updates storage.
MutableArrayRef<Expr *> getFinals() {
Expr **Storage = reinterpret_cast<Expr **>(
&*std::next(child_begin(),
getArraysOffset(getDirectiveKind()) + 2 * CollapsedNum));
return MutableArrayRef<Expr *>(Storage, CollapsedNum);
}
protected:
/// \brief Build instance of loop directive of class \a Kind.
///
/// \param SC Statement class.
/// \param Kind Kind of OpenMP directive.
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed loops from 'collapse' clause.
/// \param NumClauses Number of clauses.
/// \param NumSpecialChildren Number of additional directive-specific stmts.
///
template <typename T>
OMPLoopDirective(const T *That, StmtClass SC, OpenMPDirectiveKind Kind,
SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses,
unsigned NumSpecialChildren = 0)
: OMPExecutableDirective(That, SC, Kind, StartLoc, EndLoc, NumClauses,
numLoopChildren(CollapsedNum, Kind) +
NumSpecialChildren),
CollapsedNum(CollapsedNum) {}
/// \brief Offset to the start of children expression arrays.
static unsigned getArraysOffset(OpenMPDirectiveKind Kind) {
return isOpenMPWorksharingDirective(Kind) ? WorksharingEnd
: DefaultEnd;
}
/// \brief Children number.
static unsigned numLoopChildren(unsigned CollapsedNum,
OpenMPDirectiveKind Kind) {
return getArraysOffset(Kind) +
3 * CollapsedNum; // Counters, Updates and Finals
}
void setIterationVariable(Expr *IV) {
*std::next(child_begin(), IterationVariableOffset) = IV;
}
void setLastIteration(Expr *LI) {
*std::next(child_begin(), LastIterationOffset) = LI;
}
void setCalcLastIteration(Expr *CLI) {
*std::next(child_begin(), CalcLastIterationOffset) = CLI;
}
void setPreCond(Expr *PC) {
*std::next(child_begin(), PreConditionOffset) = PC;
}
void setCond(Expr *Cond) {
*std::next(child_begin(), CondOffset) = Cond;
}
void setInit(Expr *Init) { *std::next(child_begin(), InitOffset) = Init; }
void setInc(Expr *Inc) { *std::next(child_begin(), IncOffset) = Inc; }
void setIsLastIterVariable(Expr *IL) {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
*std::next(child_begin(), IsLastIterVariableOffset) = IL;
}
void setLowerBoundVariable(Expr *LB) {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
*std::next(child_begin(), LowerBoundVariableOffset) = LB;
}
void setUpperBoundVariable(Expr *UB) {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
*std::next(child_begin(), UpperBoundVariableOffset) = UB;
}
void setStrideVariable(Expr *ST) {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
*std::next(child_begin(), StrideVariableOffset) = ST;
}
void setEnsureUpperBound(Expr *EUB) {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
*std::next(child_begin(), EnsureUpperBoundOffset) = EUB;
}
void setNextLowerBound(Expr *NLB) {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
*std::next(child_begin(), NextLowerBoundOffset) = NLB;
}
void setNextUpperBound(Expr *NUB) {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
*std::next(child_begin(), NextUpperBoundOffset) = NUB;
}
void setCounters(ArrayRef<Expr *> A);
void setUpdates(ArrayRef<Expr *> A);
void setFinals(ArrayRef<Expr *> A);
public:
/// \brief The expressions built for the OpenMP loop CodeGen for the
/// whole collapsed loop nest.
struct HelperExprs {
/// \brief Loop iteration variable.
Expr *IterationVarRef;
/// \brief Loop last iteration number.
Expr *LastIteration;
/// \brief Loop number of iterations.
Expr *NumIterations;
/// \brief Calculation of last iteration.
Expr *CalcLastIteration;
/// \brief Loop pre-condition.
Expr *PreCond;
/// \brief Loop condition.
Expr *Cond;
/// \brief Loop iteration variable init.
Expr *Init;
/// \brief Loop increment.
Expr *Inc;
/// \brief IsLastIteration - local flag variable passed to runtime.
Expr *IL;
/// \brief LowerBound - local variable passed to runtime.
Expr *LB;
/// \brief UpperBound - local variable passed to runtime.
Expr *UB;
/// \brief Stride - local variable passed to runtime.
Expr *ST;
/// \brief EnsureUpperBound -- expression LB = min(LB, NumIterations).
Expr *EUB;
/// \brief Update of LowerBound for statically sheduled 'omp for' loops.
Expr *NLB;
/// \brief Update of UpperBound for statically sheduled 'omp for' loops.
Expr *NUB;
/// \brief Counters Loop counters.
SmallVector<Expr *, 4> Counters;
/// \brief Expressions for loop counters update for CodeGen.
SmallVector<Expr *, 4> Updates;
/// \brief Final loop counter values for GodeGen.
SmallVector<Expr *, 4> Finals;
/// \brief Check if all the expressions are built (does not check the
/// worksharing ones).
bool builtAll() {
return IterationVarRef != nullptr && LastIteration != nullptr &&
NumIterations != nullptr && PreCond != nullptr &&
Cond != nullptr && Init != nullptr && Inc != nullptr;
}
/// \brief Initialize all the fields to null.
/// \param Size Number of elements in the counters/finals/updates arrays.
void clear(unsigned Size) {
IterationVarRef = nullptr;
LastIteration = nullptr;
CalcLastIteration = nullptr;
PreCond = nullptr;
Cond = nullptr;
Init = nullptr;
Inc = nullptr;
IL = nullptr;
LB = nullptr;
UB = nullptr;
ST = nullptr;
EUB = nullptr;
NLB = nullptr;
NUB = nullptr;
Counters.resize(Size);
Updates.resize(Size);
Finals.resize(Size);
for (unsigned i = 0; i < Size; ++i) {
Counters[i] = nullptr;
Updates[i] = nullptr;
Finals[i] = nullptr;
}
}
};
/// \brief Get number of collapsed loops.
unsigned getCollapsedNumber() const { return CollapsedNum; }
Expr *getIterationVariable() const {
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), IterationVariableOffset)));
}
Expr *getLastIteration() const {
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), LastIterationOffset)));
}
Expr *getCalcLastIteration() const {
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), CalcLastIterationOffset)));
}
Expr *getPreCond() const {
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), PreConditionOffset)));
}
Expr *getCond() const {
return const_cast<Expr *>(
reinterpret_cast<const Expr *>(*std::next(child_begin(), CondOffset)));
}
Expr *getInit() const {
return const_cast<Expr *>(
reinterpret_cast<const Expr *>(*std::next(child_begin(), InitOffset)));
}
Expr *getInc() const {
return const_cast<Expr *>(
reinterpret_cast<const Expr *>(*std::next(child_begin(), IncOffset)));
}
Expr *getIsLastIterVariable() const {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), IsLastIterVariableOffset)));
}
Expr *getLowerBoundVariable() const {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), LowerBoundVariableOffset)));
}
Expr *getUpperBoundVariable() const {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), UpperBoundVariableOffset)));
}
Expr *getStrideVariable() const {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), StrideVariableOffset)));
}
Expr *getEnsureUpperBound() const {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), EnsureUpperBoundOffset)));
}
Expr *getNextLowerBound() const {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), NextLowerBoundOffset)));
}
Expr *getNextUpperBound() const {
assert(isOpenMPWorksharingDirective(getDirectiveKind()) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), NextUpperBoundOffset)));
}
const Stmt *getBody() const {
// This relies on the loop form is already checked by Sema.
Stmt *Body = getAssociatedStmt()->IgnoreContainers(true);
Body = cast<ForStmt>(Body)->getBody();
for (unsigned Cnt = 1; Cnt < CollapsedNum; ++Cnt) {
Body = Body->IgnoreContainers();
Body = cast<ForStmt>(Body)->getBody();
}
return Body;
}
ArrayRef<Expr *> counters() { return getCounters(); }
ArrayRef<Expr *> counters() const {
return const_cast<OMPLoopDirective *>(this)->getCounters();
}
ArrayRef<Expr *> updates() { return getUpdates(); }
ArrayRef<Expr *> updates() const {
return const_cast<OMPLoopDirective *>(this)->getUpdates();
}
ArrayRef<Expr *> finals() { return getFinals(); }
ArrayRef<Expr *> finals() const {
return const_cast<OMPLoopDirective *>(this)->getFinals();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSimdDirectiveClass ||
T->getStmtClass() == OMPForDirectiveClass ||
T->getStmtClass() == OMPForSimdDirectiveClass ||
T->getStmtClass() == OMPParallelForDirectiveClass ||
T->getStmtClass() == OMPParallelForSimdDirectiveClass;
}
};
/// \brief This represents '#pragma omp simd' directive.
///
/// \code
/// #pragma omp simd private(a,b) linear(i,j:s) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp simd' has clauses 'private'
/// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and
/// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'.
///
class OMPSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPSimdDirectiveClass, OMPD_simd, StartLoc,
EndLoc, CollapsedNum, NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPSimdDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPSimdDirectiveClass, OMPD_simd,
SourceLocation(), SourceLocation(), CollapsedNum,
NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPSimdDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt,
const HelperExprs &Exprs);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSimdDirectiveClass;
}
};
/// \brief This represents '#pragma omp for' directive.
///
/// \code
/// #pragma omp for private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp for' has clauses 'private' with the
/// variables 'a' and 'b' and 'reduction' with operator '+' and variables 'c'
/// and 'd'.
///
class OMPForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPForDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPForDirectiveClass, OMPD_for, StartLoc, EndLoc,
CollapsedNum, NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPForDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPForDirectiveClass, OMPD_for, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPForDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt,
const HelperExprs &Exprs);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPForDirectiveClass;
}
};
/// \brief This represents '#pragma omp for simd' directive.
///
/// \code
/// #pragma omp for simd private(a,b) linear(i,j:s) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp for simd' has clauses 'private'
/// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and
/// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'.
///
class OMPForSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPForSimdDirectiveClass, OMPD_for_simd,
StartLoc, EndLoc, CollapsedNum, NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPForSimdDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPForSimdDirectiveClass, OMPD_for_simd,
SourceLocation(), SourceLocation(), CollapsedNum,
NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPForSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPForSimdDirectiveClass;
}
};
/// \brief This represents '#pragma omp sections' directive.
///
/// \code
/// #pragma omp sections private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp sections' has clauses 'private' with
/// the variables 'a' and 'b' and 'reduction' with operator '+' and variables
/// 'c' and 'd'.
///
class OMPSectionsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPSectionsDirectiveClass, OMPD_sections,
StartLoc, EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPSectionsDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPSectionsDirectiveClass, OMPD_sections,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPSectionsDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPSectionsDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSectionsDirectiveClass;
}
};
/// \brief This represents '#pragma omp section' directive.
///
/// \code
/// #pragma omp section
/// \endcode
///
class OMPSectionDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPSectionDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPSectionDirectiveClass, OMPD_section,
StartLoc, EndLoc, 0, 1) {}
/// \brief Build an empty directive.
///
explicit OMPSectionDirective()
: OMPExecutableDirective(this, OMPSectionDirectiveClass, OMPD_section,
SourceLocation(), SourceLocation(), 0, 1) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPSectionDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPSectionDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSectionDirectiveClass;
}
};
/// \brief This represents '#pragma omp single' directive.
///
/// \code
/// #pragma omp single private(a,b) copyprivate(c,d)
/// \endcode
/// In this example directive '#pragma omp single' has clauses 'private' with
/// the variables 'a' and 'b' and 'copyprivate' with variables 'c' and 'd'.
///
class OMPSingleDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPSingleDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPSingleDirectiveClass, OMPD_single,
StartLoc, EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPSingleDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPSingleDirectiveClass, OMPD_single,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPSingleDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPSingleDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSingleDirectiveClass;
}
};
/// \brief This represents '#pragma omp master' directive.
///
/// \code
/// #pragma omp master
/// \endcode
///
class OMPMasterDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPMasterDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPMasterDirectiveClass, OMPD_master,
StartLoc, EndLoc, 0, 1) {}
/// \brief Build an empty directive.
///
explicit OMPMasterDirective()
: OMPExecutableDirective(this, OMPMasterDirectiveClass, OMPD_master,
SourceLocation(), SourceLocation(), 0, 1) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPMasterDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPMasterDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPMasterDirectiveClass;
}
};
/// \brief This represents '#pragma omp critical' directive.
///
/// \code
/// #pragma omp critical
/// \endcode
///
class OMPCriticalDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Name of the directive.
DeclarationNameInfo DirName;
/// \brief Build directive with the given start and end location.
///
/// \param Name Name of the directive.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPCriticalDirective(const DeclarationNameInfo &Name, SourceLocation StartLoc,
SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPCriticalDirectiveClass, OMPD_critical,
StartLoc, EndLoc, 0, 1),
DirName(Name) {}
/// \brief Build an empty directive.
///
explicit OMPCriticalDirective()
: OMPExecutableDirective(this, OMPCriticalDirectiveClass, OMPD_critical,
SourceLocation(), SourceLocation(), 0, 1),
DirName() {}
/// \brief Set name of the directive.
///
/// \param Name Name of the directive.
///
void setDirectiveName(const DeclarationNameInfo &Name) { DirName = Name; }
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param Name Name of the directive.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPCriticalDirective *
Create(const ASTContext &C, const DeclarationNameInfo &Name,
SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AssociatedStmt);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPCriticalDirective *CreateEmpty(const ASTContext &C, EmptyShell);
/// \brief Return name of the directive.
///
DeclarationNameInfo getDirectiveName() const { return DirName; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPCriticalDirectiveClass;
}
};
/// \brief This represents '#pragma omp parallel for' directive.
///
/// \code
/// #pragma omp parallel for private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp parallel for' has clauses 'private'
/// with the variables 'a' and 'b' and 'reduction' with operator '+' and
/// variables 'c' and 'd'.
///
class OMPParallelForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPParallelForDirectiveClass, OMPD_parallel_for,
StartLoc, EndLoc, CollapsedNum, NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPParallelForDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPParallelForDirectiveClass, OMPD_parallel_for,
SourceLocation(), SourceLocation(), CollapsedNum,
NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPParallelForDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelForDirectiveClass;
}
};
/// \brief This represents '#pragma omp parallel for simd' directive.
///
/// \code
/// #pragma omp parallel for simd private(a,b) linear(i,j:s) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp parallel for simd' has clauses
/// 'private' with the variables 'a' and 'b', 'linear' with variables 'i', 'j'
/// and linear step 's', 'reduction' with operator '+' and variables 'c' and
/// 'd'.
///
class OMPParallelForSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPParallelForSimdDirectiveClass,
OMPD_parallel_for_simd, StartLoc, EndLoc, CollapsedNum,
NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPParallelForSimdDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPParallelForSimdDirectiveClass,
OMPD_parallel_for_simd, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPParallelForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPParallelForSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelForSimdDirectiveClass;
}
};
/// \brief This represents '#pragma omp parallel sections' directive.
///
/// \code
/// #pragma omp parallel sections private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp parallel sections' has clauses
/// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+'
/// and variables 'c' and 'd'.
///
class OMPParallelSectionsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPParallelSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPParallelSectionsDirectiveClass,
OMPD_parallel_sections, StartLoc, EndLoc,
NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPParallelSectionsDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPParallelSectionsDirectiveClass,
OMPD_parallel_sections, SourceLocation(),
SourceLocation(), NumClauses, 1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPParallelSectionsDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPParallelSectionsDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelSectionsDirectiveClass;
}
};
/// \brief This represents '#pragma omp task' directive.
///
/// \code
/// #pragma omp task private(a,b) final(d)
/// \endcode
/// In this example directive '#pragma omp task' has clauses 'private' with the
/// variables 'a' and 'b' and 'final' with condition 'd'.
///
class OMPTaskDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPTaskDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTaskDirectiveClass, OMPD_task, StartLoc,
EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTaskDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTaskDirectiveClass, OMPD_task,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTaskDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTaskDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskDirectiveClass;
}
};
/// \brief This represents '#pragma omp taskyield' directive.
///
/// \code
/// #pragma omp taskyield
/// \endcode
///
class OMPTaskyieldDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPTaskyieldDirectiveClass, OMPD_taskyield,
StartLoc, EndLoc, 0, 0) {}
/// \brief Build an empty directive.
///
explicit OMPTaskyieldDirective()
: OMPExecutableDirective(this, OMPTaskyieldDirectiveClass, OMPD_taskyield,
SourceLocation(), SourceLocation(), 0, 0) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPTaskyieldDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPTaskyieldDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskyieldDirectiveClass;
}
};
/// \brief This represents '#pragma omp barrier' directive.
///
/// \code
/// #pragma omp barrier
/// \endcode
///
class OMPBarrierDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPBarrierDirectiveClass, OMPD_barrier,
StartLoc, EndLoc, 0, 0) {}
/// \brief Build an empty directive.
///
explicit OMPBarrierDirective()
: OMPExecutableDirective(this, OMPBarrierDirectiveClass, OMPD_barrier,
SourceLocation(), SourceLocation(), 0, 0) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPBarrierDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPBarrierDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPBarrierDirectiveClass;
}
};
/// \brief This represents '#pragma omp taskwait' directive.
///
/// \code
/// #pragma omp taskwait
/// \endcode
///
class OMPTaskwaitDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPTaskwaitDirectiveClass, OMPD_taskwait,
StartLoc, EndLoc, 0, 0) {}
/// \brief Build an empty directive.
///
explicit OMPTaskwaitDirective()
: OMPExecutableDirective(this, OMPTaskwaitDirectiveClass, OMPD_taskwait,
SourceLocation(), SourceLocation(), 0, 0) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPTaskwaitDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPTaskwaitDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskwaitDirectiveClass;
}
};
/// \brief This represents '#pragma omp taskgroup' directive.
///
/// \code
/// #pragma omp taskgroup
/// \endcode
///
class OMPTaskgroupDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTaskgroupDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPTaskgroupDirectiveClass, OMPD_taskgroup,
StartLoc, EndLoc, 0, 1) {}
/// \brief Build an empty directive.
///
explicit OMPTaskgroupDirective()
: OMPExecutableDirective(this, OMPTaskgroupDirectiveClass, OMPD_taskgroup,
SourceLocation(), SourceLocation(), 0, 1) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTaskgroupDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPTaskgroupDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskgroupDirectiveClass;
}
};
/// \brief This represents '#pragma omp flush' directive.
///
/// \code
/// #pragma omp flush(a,b)
/// \endcode
/// In this example directive '#pragma omp flush' has 2 arguments- variables 'a'
/// and 'b'.
/// 'omp flush' directive does not have clauses but have an optional list of
/// variables to flush. This list of variables is stored within some fake clause
/// FlushClause.
class OMPFlushDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPFlushDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPFlushDirectiveClass, OMPD_flush,
StartLoc, EndLoc, NumClauses, 0) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPFlushDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPFlushDirectiveClass, OMPD_flush,
SourceLocation(), SourceLocation(), NumClauses,
0) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses (only single OMPFlushClause clause is
/// allowed).
///
static OMPFlushDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPFlushDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPFlushDirectiveClass;
}
};
/// \brief This represents '#pragma omp ordered' directive.
///
/// \code
/// #pragma omp ordered
/// \endcode
///
class OMPOrderedDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPOrderedDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPOrderedDirectiveClass, OMPD_ordered,
StartLoc, EndLoc, 0, 1) {}
/// \brief Build an empty directive.
///
explicit OMPOrderedDirective()
: OMPExecutableDirective(this, OMPOrderedDirectiveClass, OMPD_ordered,
SourceLocation(), SourceLocation(), 0, 1) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPOrderedDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPOrderedDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPOrderedDirectiveClass;
}
};
/// \brief This represents '#pragma omp atomic' directive.
///
/// \code
/// #pragma omp atomic capture
/// \endcode
/// In this example directive '#pragma omp atomic' has clause 'capture'.
///
class OMPAtomicDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Used for 'atomic update' or 'atomic capture' constructs. They may
/// have atomic expressions of forms
/// \code
/// x = x binop expr;
/// x = expr binop x;
/// \endcode
/// This field is true for the first form of the expression and false for the
/// second. Required for correct codegen of non-associative operations (like
/// << or >>).
bool IsXLHSInRHSPart;
/// \brief Used for 'atomic update' or 'atomic capture' constructs. They may
/// have atomic expressions of forms
/// \code
/// v = x; <update x>;
/// <update x>; v = x;
/// \endcode
/// This field is true for the first(postfix) form of the expression and false
/// otherwise.
bool IsPostfixUpdate;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPAtomicDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPAtomicDirectiveClass, OMPD_atomic,
StartLoc, EndLoc, NumClauses, 5),
IsXLHSInRHSPart(false), IsPostfixUpdate(false) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPAtomicDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPAtomicDirectiveClass, OMPD_atomic,
SourceLocation(), SourceLocation(), NumClauses,
5),
IsXLHSInRHSPart(false), IsPostfixUpdate(false) {}
/// \brief Set 'x' part of the associated expression/statement.
void setX(Expr *X) { *std::next(child_begin()) = X; }
/// \brief Set helper expression of the form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
void setUpdateExpr(Expr *UE) { *std::next(child_begin(), 2) = UE; }
/// \brief Set 'v' part of the associated expression/statement.
void setV(Expr *V) { *std::next(child_begin(), 3) = V; }
/// \brief Set 'expr' part of the associated expression/statement.
void setExpr(Expr *E) { *std::next(child_begin(), 4) = E; }
public:
/// \brief Creates directive with a list of \a Clauses and 'x', 'v' and 'expr'
/// parts of the atomic construct (see Section 2.12.6, atomic Construct, for
/// detailed description of 'x', 'v' and 'expr').
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param X 'x' part of the associated expression/statement.
/// \param V 'v' part of the associated expression/statement.
/// \param E 'expr' part of the associated expression/statement.
/// \param UE Helper expression of the form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
/// \param IsXLHSInRHSPart true if \a UE has the first form and false if the
/// second.
/// \param IsPostfixUpdate true if original value of 'x' must be stored in
/// 'v', not an updated one.
static OMPAtomicDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *X, Expr *V,
Expr *E, Expr *UE, bool IsXLHSInRHSPart, bool IsPostfixUpdate);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPAtomicDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// \brief Get 'x' part of the associated expression/statement.
Expr *getX() { return cast_or_null<Expr>(*std::next(child_begin())); }
const Expr *getX() const {
return cast_or_null<Expr>(*std::next(child_begin()));
}
/// \brief Get helper expression of the form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
Expr *getUpdateExpr() {
return cast_or_null<Expr>(*std::next(child_begin(), 2));
}
const Expr *getUpdateExpr() const {
return cast_or_null<Expr>(*std::next(child_begin(), 2));
}
/// \brief Return true if helper update expression has form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' and false if it has form
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
bool isXLHSInRHSPart() const { return IsXLHSInRHSPart; }
/// \brief Return true if 'v' expression must be updated to original value of
/// 'x', false if 'v' must be updated to the new value of 'x'.
bool isPostfixUpdate() const { return IsPostfixUpdate; }
/// \brief Get 'v' part of the associated expression/statement.
Expr *getV() { return cast_or_null<Expr>(*std::next(child_begin(), 3)); }
const Expr *getV() const {
return cast_or_null<Expr>(*std::next(child_begin(), 3));
}
/// \brief Get 'expr' part of the associated expression/statement.
Expr *getExpr() { return cast_or_null<Expr>(*std::next(child_begin(), 4)); }
const Expr *getExpr() const {
return cast_or_null<Expr>(*std::next(child_begin(), 4));
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPAtomicDirectiveClass;
}
};
/// \brief This represents '#pragma omp target' directive.
///
/// \code
/// #pragma omp target if(a)
/// \endcode
/// In this example directive '#pragma omp target' has clause 'if' with
/// condition 'a'.
///
class OMPTargetDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPTargetDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetDirectiveClass, OMPD_target,
StartLoc, EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTargetDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetDirectiveClass, OMPD_target,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTargetDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetDirectiveClass;
}
};
/// \brief This represents '#pragma omp teams' directive.
///
/// \code
/// #pragma omp teams if(a)
/// \endcode
/// In this example directive '#pragma omp teams' has clause 'if' with
/// condition 'a'.
///
class OMPTeamsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTeamsDirectiveClass, OMPD_teams,
StartLoc, EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTeamsDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTeamsDirectiveClass, OMPD_teams,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTeamsDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDirectiveClass;
}
};
/// \brief This represents '#pragma omp cancellation point' directive.
///
/// \code
/// #pragma omp cancellation point for
/// \endcode
///
/// In this example a cancellation point is created for innermost 'for' region.
class OMPCancellationPointDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
OpenMPDirectiveKind CancelRegion;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPCancellationPointDirectiveClass,
OMPD_cancellation_point, StartLoc, EndLoc, 0, 0),
CancelRegion(OMPD_unknown) {}
/// \brief Build an empty directive.
///
explicit OMPCancellationPointDirective()
: OMPExecutableDirective(this, OMPCancellationPointDirectiveClass,
OMPD_cancellation_point, SourceLocation(),
SourceLocation(), 0, 0),
CancelRegion(OMPD_unknown) {}
/// \brief Set cancel region for current cancellation point.
/// \param CR Cancellation region.
void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; }
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPCancellationPointDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPCancellationPointDirective *CreateEmpty(const ASTContext &C,
EmptyShell);
/// \brief Get cancellation region for the current cancellation point.
OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPCancellationPointDirectiveClass;
}
};
/// \brief This represents '#pragma omp cancel' directive.
///
/// \code
/// #pragma omp cancel for
/// \endcode
///
/// In this example a cancel is created for innermost 'for' region.
class OMPCancelDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
OpenMPDirectiveKind CancelRegion;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPCancelDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPCancelDirectiveClass, OMPD_cancel,
StartLoc, EndLoc, 0, 0),
CancelRegion(OMPD_unknown) {}
/// \brief Build an empty directive.
///
explicit OMPCancelDirective()
: OMPExecutableDirective(this, OMPCancelDirectiveClass, OMPD_cancel,
SourceLocation(), SourceLocation(), 0, 0),
CancelRegion(OMPD_unknown) {}
/// \brief Set cancel region for current cancellation point.
/// \param CR Cancellation region.
void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; }
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPCancelDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPCancelDirective *CreateEmpty(const ASTContext &C, EmptyShell);
/// \brief Get cancellation region for the current cancellation point.
OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPCancelDirectiveClass;
}
};
} // end namespace clang
#endif
|
EW.c | // SW4 LICENSE
// # ----------------------------------------------------------------------
// # SW4 - Seismic Waves, 4th order
// # ----------------------------------------------------------------------
// # Copyright (c) 2013, Lawrence Livermore National Security, LLC.
// # Produced at the Lawrence Livermore National Laboratory.
// #
// # Written by:
// # N. Anders Petersson (petersson1@llnl.gov)
// # Bjorn Sjogreen (sjogreen2@llnl.gov)
// #
// # LLNL-CODE-643337
// #
// # All rights reserved.
// #
// # This file is part of SW4, Version: 1.0
// #
// # Please also read LICENCE.txt, which contains "Our Notice and GNU General Public License"
// #
// # This program is free software; you can redistribute it and/or modify
// # it under the terms of the GNU General Public License (as published by
// # the Free Software Foundation) version 2, dated June 1991.
// #
// # This program is distributed in the hope that it will be useful, but
// # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
// # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
// # conditions of the GNU General Public License for more details.
// #
// # You should have received a copy of the GNU General Public License
// # along with this program; if not, write to the Free Software
// # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA
#include "sw4.h"
#include "EW.h"
#include <sstream>
#include <fstream>
#ifdef SW4_OPENMP
#include <omp.h>
#endif
#include <mpi.h>
#include <cstring>
#include <cstdlib>
#include <cstdio>
#include <unistd.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <algorithm>
#include <cmath>
#include "Source.h"
#include "GridPointSource.h"
#include "CheckPoint.h"
#include "MaterialBlock.h"
#include "TimeSeries.h"
#include "F77_FUNC.h"
#include "EWCuda.h"
extern "C"
{
void F77_FUNC(dspev,DSPEV)(char & JOBZ, char & UPLO, int & N, double *AP, double *W, double *Z, int & LDZ, double *WORK, int & INFO);
}
void rhs4sg_rev( int ifirst, int ilast, int jfirst, int jlast, int kfirst, int klast,
int nk, int* onesided, float_sw4* a_acof, float_sw4* a_bope, float_sw4* a_ghcof,
float_sw4* a_lu, float_sw4* a_u, float_sw4* a_mu, float_sw4* a_lambda,
float_sw4 h, float_sw4* a_strx, float_sw4* a_stry, float_sw4* a_strz );
void rhs4sg( int ifirst, int ilast, int jfirst, int jlast, int kfirst, int klast,
int nk, int* onesided, float_sw4* a_acof, float_sw4* a_bope, float_sw4* a_ghcof,
float_sw4* a_lu, float_sw4* a_u, float_sw4* a_mu, float_sw4* a_lambda,
float_sw4 h, float_sw4* a_strx, float_sw4* a_stry, float_sw4* a_strz );
void rhs4sgcurv_rev( int ifirst, int ilast, int jfirst, int jlast, int kfirst, int klast,
float_sw4* a_u, float_sw4* a_mu, float_sw4* a_lambda, float_sw4* a_met,
float_sw4* a_jac, float_sw4* a_lu, int* onesided, float_sw4* acof,
float_sw4* bope, float_sw4* ghcof, float_sw4* a_strx, float_sw4* a_stry );
void rhs4sgcurv( int ifirst, int ilast, int jfirst, int jlast, int kfirst, int klast,
float_sw4* a_u, float_sw4* a_mu, float_sw4* a_lambda, float_sw4* a_met,
float_sw4* a_jac, float_sw4* a_lu, int* onesided, float_sw4* acof,
float_sw4* bope, float_sw4* ghcof, float_sw4* a_strx, float_sw4* a_stry );
EW::EW( const string& filename ) :
mCFL(1.3),
mTstart(0.0),
mTmax(0.0),
mTimeIsSet(false),
mNumberOfTimeSteps(-1),
mPrintInterval(100),
m_ghost_points(2),
m_ext_ghost_points(2),
m_ppadding(2),
mVerbose(0),
mQuiet(false),
m_supergrid_damping_coefficient(0.02),
m_sg_damping_order(4),
m_sg_gp_thickness(20),
m_use_supergrid(false),
m_checkfornan(false),
m_topography_exists(false),
m_grid_interpolation_order(4),
m_zetaBreak(0.95),
m_point_source_test(false),
mPath("./"),
m_moment_test(false),
m_pfs(false),
m_nwriters(8),
m_output_detailed_timing(false),
m_save_trace(false),
m_ndevice(0),
m_corder(false),
mGeoAz(0.0),
mLonOrigin(-118.0),
mLatOrigin(37.0),
mMetersPerDegree(111319.5),
mMetersPerLongitude(87721.0),
mConstMetersPerLongitude(false)
{
m_gpu_blocksize[0] = 16;
m_gpu_blocksize[1] = 16;
m_gpu_blocksize[2] = 1;
if( sizeof(float_sw4) == 4 )
m_mpifloat = MPI_FLOAT;
else if( sizeof(float_sw4) == 8 )
m_mpifloat = MPI_DOUBLE;
else
CHECK_INPUT(false,"Error, could not identify float_sw4");
MPI_Comm_rank( MPI_COMM_WORLD, &m_myrank );
MPI_Comm_size( MPI_COMM_WORLD, &m_nprocs );
m_restart_check_point = CheckPoint::nil;
parseInputFile( filename );
setupRun( );
timesteploop( mU, mUm );
}
//-----------------------------------------------------------------------
int EW::computeEndGridPoint( float_sw4 maxval, float_sw4 h )
{
const float_sw4 reltol = 1e-5;
const float_sw4 abstol = 1e-12;
float_sw4 fnpts = round(maxval/h+1);
int npts;
if( fabs((fnpts-1)*h-maxval) < reltol*fabs(maxval)+abstol )
npts = static_cast<int>(fnpts);
else
npts = static_cast<int>(fnpts)+1;
return npts;
}
//-----------------------------------------------------------------------
bool EW::startswith(const char begin[], char *line)
{
int lenb = strlen(begin);
// We ignore any preceeding whitespace
while (strncmp(line, " ", 1) == 0 || strncmp(line, "\t", 1) == 0)
line++;
if (strncmp(begin, line, lenb) == 0)
return true;
else
return false;
}
//-----------------------------------------------------------------------
void EW::badOption(string name, char* option) const
{
if (m_myrank == 0)
cout << "\tWarning: ignoring " << name << " line option '" << option << "'" << endl;
}
//-----------------------------------------------------------------------
void EW::processGrid( char* buffer )
{
float_sw4 x = 0.0, y=0.0, z=0.0, h=0.0;
int nx=0, ny=0, nz=0;
stringstream gridSetupErrStream;
gridSetupErrStream << endl
<< "----------------------------------------" << endl
<< " Only five ways to setup grid: " << endl
<< " 1. provide h and nx, ny, nz " << endl
<< " 2. provide h and x, y, z " << endl
<< " 3. provide x,y,z and nx " << endl
<< " 4. provide x,y,z and ny " << endl
<< " 5. provide x,y,z and nz " << endl
<< "----------------------------------------" << endl
<< endl;
string gridSetupErr = gridSetupErrStream.str();
char* token = strtok(buffer, " \t");
token = strtok(NULL, " \t");
string err = "ERROR in ProcessGrid: ";
if( m_myrank == 0 )
cout << endl << "* Processing the grid command..." << endl;
while (token != NULL)
{
// while there are tokens in the string still
if (startswith("#", token) || startswith(" ", buffer))
// Ignore commented lines and lines with just a space.
break;
if (startswith("ny=", token))
{
token += 3;
CHECK_INPUT(atoi(token) > 0,
err << "ny is not a positive integer: " << token);
ny = atoi(token);
}
else if (startswith("nx=", token))
{
token += 3;
CHECK_INPUT(atoi(token) > 0,
err << "nx is not a positive integer: " << token);
nx = atoi(token);
}
else if (startswith("nz=", token))
{
token += 3;
CHECK_INPUT(atoi(token) >= 0,
err << "nz is not a positive integer: " << token);
nz = atoi(token);
}
else if (startswith("x=", token))
{
token += 2;
CHECK_INPUT(atof(token) > 0.0, err << "x is not a positive float: " << token);
x = atof(token);
}
else if (startswith("y=", token))
{
token += 2;
CHECK_INPUT(atof(token) >= 0.0, err << "y is negative: " << token);
y = atof(token);
}
else if (startswith("z=", token))
{
token += 2;
CHECK_INPUT(atof(token) > 0.0, err << "z is not a positive float: " << token);
z = atof(token);
}
else if (startswith("h=", token))
{
token += 2;
CHECK_INPUT(atof(token) > 0.0,
err << "h is not a positive float: " << token);
h = atof(token);
}
else
{
badOption("grid", token);
}
token = strtok(NULL, " \t");
}
//--------------------------------------------------------------------
// There are only three ways to specify a grid.
//--------------------------------------------------------------------
if (h != 0.0)
{
if (nx > 0 || nz > 0 || ny > 0)
{
//----------------------------------------------------------------
// 1. nx, [ny], nz and h
//----------------------------------------------------------------
CHECK_INPUT(nx && nz, gridSetupErr);
CHECK_INPUT(x == 0.0 && y == 0.0 && z == 0.0, gridSetupErr);
}
else
{
//--------------------------------------------------------------
// 2. x, [y], z and h
//--------------------------------------------------------------
CHECK_INPUT(x > 0.0 && z > 0.0, gridSetupErr);
CHECK_INPUT(nx == 0 && ny == 0 && nz == 0, gridSetupErr);
}
}
else
{
//--------------------------------------------------------------------
// 3. x, [y], z and nx|ny|nz
//--------------------------------------------------------------------
CHECK_INPUT(x > 0.0 && z > 0.0, gridSetupErr);
CHECK_INPUT((nx > 0) + (ny > 0) + (nz > 0) == 1, gridSetupErr);
}
int nxprime, nyprime, nzprime;
float_sw4 xprime, yprime, zprime;
if (nx > 0 && h == 0.0)
{
// we set the number grid points in the x direction
// so we'll compute the grid spacing from that.
h = x / (nx-1);
if (m_myrank == 0)
cout << "Setting h to " << h << " from x/(nx-1) (x=" << x << ", nx=" << nx << ")" << endl;
nxprime = nx;
nzprime = computeEndGridPoint(z, h);
nyprime = computeEndGridPoint(y, h);
}
else if (ny > 0 && h == 0.0)
{
// set hte number of grid points from y direction and ny
h = y/(ny-1);
if (m_myrank == 0)
cout << "Setting h to " << h << " from y/(ny-1) (y=" << y << ", ny=" << ny << ")" << endl;
nyprime = ny;
nxprime = computeEndGridPoint(x, h);
nzprime = computeEndGridPoint(z, h);
}
else if (nz > 0 && h == 0.0)
{
// set the number of grid points from z direction and nz
h = z/(nz-1);
if (m_myrank == 0)
cout << "Setting h to " << h << " from z/(nz-1) (z=" << z << ", nz=" << nz << ")" << endl;
nzprime = nz;
nxprime = computeEndGridPoint(x, h);
nyprime = computeEndGridPoint(y, h);
}
else
{
//----------------------------------------------------
// h was set by the user, so compute the appropriate
// nx, ny, and nz or x, y, z.
//----------------------------------------------------
if (nx == 0 && x != 0.0)
nxprime = computeEndGridPoint(x, h);
else if (nx != 0)
nxprime = nx;
else
CHECK_INPUT(0, gridSetupErr);
if (nz == 0 && z != 0.0)
nzprime = computeEndGridPoint(z, h);
else if (nz != 0)
nzprime = nz;
else
CHECK_INPUT(0, gridSetupErr);
if (ny == 0 && y != 0.0)
nyprime = computeEndGridPoint(y, h);
else if (ny != 0)
nyprime = ny;
else
CHECK_INPUT(0, gridSetupErr);
}
if (m_myrank == 0 && mVerbose >=3)
printf("Setting up the grid for a non-periodic problem\n");
if (nxprime != nx && m_myrank == 0)
cout << "Setting nx to " << nxprime << " to be consistent with h=" << h << endl;
if (nyprime != ny && m_myrank == 0)
cout << "Setting ny to " << nyprime << " to be consistent with h=" << h << endl;
if (nzprime != nz && m_myrank == 0)
cout << "Setting nz to " << nzprime << " to be consistent with h=" << h << endl;
// -------------------------------------------------------------
// Now we adjust the geometry bounds based on the actual
// number of grid points used in each dimension.
// -------------------------------------------------------------
xprime = (nxprime-1)*h;
zprime = (nzprime-1)*h;
yprime = (nyprime-1)*h;
float_sw4 eps = 1.e-9*sqrt(xprime*xprime+yprime*yprime+zprime*zprime);
if (fabs(xprime-x) > eps && m_myrank == 0)
cout << "Changing x from " << x << " to " << xprime << " to be consistent with h=" << h << endl;
if (fabs(zprime-z) > eps && m_myrank == 0)
cout << "Changing z from " << z << " to " << zprime << " to be consistent with h=" << h << endl;
if (fabs(yprime-y) > eps && m_myrank == 0)
cout << "Changing y from " << y << " to " << yprime << " to be consistent with h=" << h << endl;
m_nx_base = nxprime;
m_ny_base = nyprime;
m_nz_base = nzprime;
m_h_base = h;
m_global_xmax = xprime;
m_global_ymax = yprime;
m_global_zmax = zprime;
m_global_zmin = 0;
}
//-----------------------------------------------------------------------
void EW::processTime(char* buffer)
{
float_sw4 t=0.0;
int steps = -1;
char* token = strtok(buffer, " \t");
token = strtok(NULL, " \t");
string err = "ERROR in processTime: ";
while (token != NULL)
{
// while there are still tokens in the string
if (startswith("#", token) || startswith(" ", buffer))
// Ignore commented lines and lines with just a space.
break;
if (startswith("t=", token))
{
token += 2; // skip t=
CHECK_INPUT(atof(token) >= 0.0, err << "t is not a positive float: " << token);
t = atof(token);
}
else if (startswith("steps=", token))
{
token += 6; // skip steps=
CHECK_INPUT(atoi(token) >= 0, err << "steps is not a non-negative integer: " << token);
steps = atoi(token);
}
else
{
badOption("time", token);
}
token = strtok(NULL, " \t");
}
CHECK_INPUT(!( (t > 0.0) && (steps >= 0) ),
"Time Error: Cannot set both t and steps for time");
if (t > 0.0)
{
mTmax = t;
mTstart = 0;
mTimeIsSet = true;
}
else if (steps >= 0)
{
mTstart = 0;
mNumberOfTimeSteps = steps;
mTimeIsSet = false;
}
// Set UTC as current date
time_t tsec;
time( &tsec );
struct tm *utctime = gmtime( &tsec );
m_utc0[0] = utctime->tm_year+1900;
m_utc0[1] = utctime->tm_mon+1;
m_utc0[2] = utctime->tm_mday;
m_utc0[3] = utctime->tm_hour;
m_utc0[4] = utctime->tm_min;
m_utc0[5] = utctime->tm_sec;
m_utc0[6] = 0; //milliseconds not given by 'time', not needed here.
}
//-----------------------------------------------------------------------
void EW::processTopography(char * buffer )
{
//
// Note, m_topoFileName, m_topoExtFileName, m_maxIter, m_EFileResolution, m_QueryTyp could
// have been declared local variables in EW::parseInputFile, and transfered as
// procedure parameters to smoothTopography and getEfileInfo
//
char* token = strtok(buffer, " \t");
CHECK_INPUT(strcmp("topography", token) == 0,
"ERROR: not a topography line...: " << token);
string topoFile="surf.tp", style, fileName;
bool needFileName=false, gotFileName=false;
m_zetaBreak=0.95;
m_grid_interpolation_order = 4;
m_use_analytical_metric = false;
token = strtok(NULL, " \t");
while (token != NULL)
{
// while there are still tokens in the string
if (startswith("#", token) || startswith(" ", buffer))
// Ignore commented lines and lines with just a space.
break;
if (startswith("zmax=", token))
{
token += 5; // skip logfile=
m_topo_zmax = atof(token);
}
// // 1234567890
else if (startswith("order=", token))
{
token += 6; // skip logfile=
m_grid_interpolation_order = atoi(token);
if (m_grid_interpolation_order < 2 || m_grid_interpolation_order > 7)
{
if (m_myrank == 0)
cout << "order needs to be 2,3,4,5,6,or 7 not: " << m_grid_interpolation_order << endl;
MPI_Abort(MPI_COMM_WORLD, 1);
}
}
// 123456789
else if( startswith("zetabreak=", token) ) // developer option: not documented in user's guide
{
token += 10;
m_zetaBreak = atof(token);
CHECK_INPUT( m_zetaBreak > 0 && m_zetaBreak <= 1, "Error: zetabreak must be in [0,1], not " << m_zetaBreak);
}
else if( startswith("input=", token ) )
{
token += 6;
style = token;
if( strcmp("gaussian", token) == 0)
// else if (strcmp("gaussian", token) == 0)
{
m_topoInputStyle=GaussianHill;
m_topography_exists=true;
}
else
{
badOption("topography> input", token);
}
}
else if( startswith("file=", token ) )
{
token += 5;
m_topoFileName = token;
gotFileName=true;
}
else if( startswith("gaussianAmp=", token ) )
{
token += 12;
m_GaussianAmp = atof(token);
}
else if( startswith("gaussianXc=", token ) )
{
token += 11;
m_GaussianXc = atof(token);
}
else if( startswith("gaussianYc=", token ) )
{
token += 11;
m_GaussianYc = atof(token);
}
else if( startswith("gaussianLx=", token ) )
{
token += 11;
m_GaussianLx = atof(token);
}
else if( startswith("gaussianLy=", token ) )
{
token += 11;
m_GaussianLy = atof(token);
}
else if( startswith("analyticalMetric=", token ) )
{
token += 17;
m_use_analytical_metric = strcmp(token,"1")==0 ||
strcmp(token,"true")==0 || strcmp(token,"yes")==0;
}
else
{
badOption("topography", token);
}
token = strtok(NULL, " \t");
}
if (needFileName)
CHECK_INPUT(gotFileName,
"ERROR: no topography file name specified...: " << token);
CHECK_INPUT(m_topoInputStyle == GaussianHill,
"Topography style " << m_topoInputStyle << " not yet implemented " << endl);
if( m_topoInputStyle != GaussianHill && m_use_analytical_metric )
{
m_use_analytical_metric = false;
if( m_myrank == 0 )
cout << "Analytical metric only defined for Gaussian Hill topography" <<
" topography analyticalMetric option will be ignored " << endl;
}
}
//-----------------------------------------------------------------------
void EW::processFileIO(char* buffer)
{
char* token = strtok(buffer, " \t");
CHECK_INPUT(strcmp("fileio", token) == 0, "ERROR: not a fileio line...: " << token);
token = strtok(NULL, " \t");
string err = "FileIO Error: ";
while (token != NULL)
{
if (startswith("#", token) || startswith(" ", buffer))
break;
if(startswith("path=", token)) {
token += 5; // skip path=
mPath = token;
mPath += '/';
// path = token;
}
else if (startswith("verbose=", token))
{
token += 8; // skip verbose=
CHECK_INPUT(atoi(token) >= 0, err << "verbose must be non-negative, not: " << token);
mVerbose = atoi(token);
}
else if (startswith("printcycle=", token))
{
token += 11; // skip printcycle=
CHECK_INPUT(atoi(token) > -1,
err << "printcycle must be zero or greater, not: " << token);
mPrintInterval = atoi(token);
}
else if (startswith("pfs=", token))
{
token += 4; // skip pfs=
m_pfs = (atoi(token) == 1);
}
else if (startswith("nwriters=", token))
{
token += 9; // skip nwriters=
CHECK_INPUT(atoi(token) > 0,
err << "nwriters must be positive, not: " << token);
m_nwriters = atoi(token);
}
else
{
badOption("fileio", token);
}
token = strtok(NULL, " \t");
}
}
//-----------------------------------------------------------------------
void EW::processCheckPoint(char* buffer)
{
char* token = strtok(buffer, " \t");
CHECK_INPUT(strcmp("checkpoint", token) == 0, "ERROR: not a checkpoint line...: " << token);
token = strtok(NULL, " \t");
string err = "CheckPoint Error: ";
int cycle=-1, cycleInterval=0;
float_sw4 time=0.0, timeInterval=0.0;
bool timingSet=false;
string filePrefix = "restart";
size_t bufsize=10000000;
while (token != NULL)
{
if (startswith("#", token) || startswith(" ", buffer))
break;
if (startswith("time=", token) )
{
token += 5; // skip time=
CHECK_INPUT( atof(token) >= 0., err << "time must be a non-negative number, not: " << token);
time = atof(token);
timingSet = true;
}
else if (startswith("timeInterval=", token) )
{
token += 13; // skip timeInterval=
CHECK_INPUT( atof(token) >= 0., err<< "timeInterval must be a non-negative number, not: " << token);
timeInterval = atof(token);
timingSet = true;
}
else if (startswith("cycle=", token) )
{
token += 6; // skip cycle=
CHECK_INPUT( atoi(token) >= 0., err << "cycle must be a non-negative integer, not: " << token);
cycle = atoi(token);
timingSet = true;
}
else if (startswith("cycleInterval=", token) )
{
token += 14; // skip cycleInterval=
CHECK_INPUT( atoi(token) >= 0., err << "cycleInterval must be a non-negative integer, not: " << token);
cycleInterval = atoi(token);
timingSet = true;
}
else if (startswith("file=", token))
{
token += 5; // skip file=
filePrefix = token;
}
else if (startswith("bufsize=", token))
{
token += 8; // skip bufsize=
bufsize = atoi(token);
}
else
{
badOption("checkpoint", token);
}
token = strtok(NULL, " \t");
}
CHECK_INPUT( timingSet, "Processing checkpoint command: " <<
"at least one timing mechanism must be set: cycle, time, cycleInterval or timeInterval" << endl );
CheckPoint* chkpt = new CheckPoint( this, time, timeInterval, cycle, cycleInterval, filePrefix, bufsize );
m_check_points.push_back(chkpt);
}
//-----------------------------------------------------------------------
void EW::processRestart(char* buffer)
{
char* token = strtok(buffer, " \t");
CHECK_INPUT(strcmp("restart", token) == 0, "ERROR: not a restart line...: " << token);
token = strtok(NULL, " \t");
string fileName;
bool filenamegiven = false;
size_t bufsize=10000000;
while (token != NULL)
{
if (startswith("#", token) || startswith(" ", buffer))
break;
if (startswith("file=", token) )
{
token += 5; // skip file=
fileName = token;
filenamegiven = true;
}
else if (startswith("bufsize=", token))
{
token += 8; // skip bufsize=
bufsize = atoi(token);
}
else
{
badOption("restart", token);
}
token = strtok(NULL, " \t");
}
CHECK_INPUT( filenamegiven, "Processing restart command: " <<
"restart file name must be given" << endl );
CHECK_INPUT( m_restart_check_point == CheckPoint::nil, "Processing restart command: "<<
" There can only be one restart file");
m_restart_check_point = new CheckPoint( this, fileName, bufsize );
}
//-----------------------------------------------------------------------
void EW::processTestPointSource(char* buffer)
{
char* token = strtok(buffer, " \t");
token = strtok(NULL, " \t");
float_sw4 cs = 1.0, rho=1.0, cp=sqrt(3.0);
bool free_surface=false;
while (token != NULL)
{
if (startswith("#", token) || startswith(" ", buffer))
break;
if (startswith("cp=", token))
{
token += 3;
cp = atof(token);
}
else if (startswith("cs=", token))
{
token += 3;
cs = atof(token);
}
else if (startswith("rho=", token))
{
token += 4;
rho = atof(token);
}
else if (startswith("diractest=", token))
{
token += 10;
if( strcmp(token,"1")==0 || strcmp(token,"true")==0 )
m_moment_test = true;
}
else if (startswith("halfspace=", token))
{
token += 10;
free_surface = ( strcmp(token,"1")==0 || strcmp(token,"true")==0 );
}
else
{
badOption("testpointsource", token);
}
token = strtok(NULL, " \t");
}
m_point_source_test = true;
float_sw4 mu = rho*cs*cs;
float_sw4 la = rho*cp*cp-2*mu;
for( int g=0 ; g < mNumberOfGrids ; g++ )
{
mRho[g].set_value(rho);
mMu[g].set_value(mu);
mLambda[g].set_value(la);
}
for( int side=0 ; side < 6 ; side++ )
mbcGlobalType[side]=bSuperGrid;
if( free_surface )
mbcGlobalType[4]=bStressFree;
}
//----------------------------------------------------------------------------
void EW::processSource( char* buffer )
{
Source* sourcePtr;
float_sw4 m0 = 1.0;
float_sw4 t0=0.0, f0=1.0, freq=1.0;
// Should be center of the grid
float_sw4 x = 0.0, y = 0.0, z = 0.0;
// int i = 0, j = 0, k = 0;
float_sw4 mxx=0.0, mxy=0.0, mxz=0.0, myy=0.0, myz=0.0, mzz=0.0;
// float_sw4 strike=0.0, dip=0.0, rake=0.0;
float_sw4 fx=0.0, fy=0.0, fz=0.0;
int isMomentType = -1;
// float_sw4 lat = 0.0, lon = 0.0, depth = 0.0;
float_sw4 depth= 0.0;
bool topodepth = false, depthSet=false, zSet=false;
bool cartCoordSet = false;
float_sw4* par=NULL;
int* ipar=NULL;
int npar=0, nipar=0;
int ncyc = 5;
timeDep tDep = iRickerInt;
char formstring[100];
// char dfile[1000];
strcpy(formstring, "Ricker");
char* token = strtok(buffer, " \t");
token = strtok(NULL, " \t");
string err = "ERROR in ProcessSource: ";
// string cartAndGeoErr = "source command: Cannot set both a geographical (lat,lon) and cartesian coordinate (x,y)";
string pointAndMomentErr = "source command: Cannot set both a point source and moment tensor formulation";
while (token != NULL)
{
// while there are tokens in the string still
if (startswith("#", token) || startswith(" ", buffer))
// Ignore commented lines and lines with just a space.
break;
if (startswith("m0=", token) )
{
token += 3; // skip m0=
CHECK_INPUT(atof(token) >= 0.0,
err << "source command: scalar moment term must be positive, not: " << token);
m0 = atof(token);
}
else if (startswith("x=", token))
{
token += 2; // skip x=
x = atof(token);
cartCoordSet = true;
}
else if (startswith("y=", token))
{
token += 2; // skip y=
y = atof(token);
cartCoordSet = true;
}
else if (startswith("z=", token))
{
token += 2; // skip z=
// with topography, the z-coordinate can have both signs!
z = atof(token);
topodepth=false; // this is absolute depth
zSet = true;
}
else if (startswith("depth=", token)) // this is the same as topodepth: different from WPP
{
token += 6; // skip depth=
depth = atof(token);
topodepth = true;
CHECK_INPUT(depth >= 0.0,
err << "source command: Depth below topography must be greater than or equal to zero");
depthSet=true;
}
else if (startswith("Mxx=", token) || startswith("mxx=", token))
{
CHECK_INPUT(isMomentType != 0, err << pointAndMomentErr);
token += 4; // skip Mxx=
mxx = atof(token);
isMomentType = 1;
}
else if (startswith("Mxy=", token) || startswith("mxy=", token))
{
CHECK_INPUT(isMomentType != 0, err << pointAndMomentErr);
token += 4; // skip Mxy=
mxy = atof(token);
isMomentType = 1;
}
else if (startswith("Mxz=", token) || startswith("mxz=", token))
{
CHECK_INPUT(isMomentType != 0, err << pointAndMomentErr);
token += 4; // skip Mxz=
mxz = atof(token);
isMomentType = 1;
}
else if (startswith("Myy=", token) || startswith("myy=", token))
{
CHECK_INPUT(isMomentType != 0, err << pointAndMomentErr);
token += 4; // skip Myy=
myy = atof(token);
isMomentType = 1;
}
else if (startswith("Myz=", token) || startswith("myz=", token))
{
CHECK_INPUT(isMomentType != 0, err << pointAndMomentErr);
token += 4; // skip Myz=
myz = atof(token);
isMomentType = 1;
}
else if (startswith("Mzz=", token) || startswith("mzz=", token))
{
CHECK_INPUT(isMomentType != 0, err << pointAndMomentErr);
token += 4; // skip Mzz=
mzz = atof(token);
isMomentType = 1;
}
else if (startswith("Fz=", token) || startswith("fz=", token))
{
CHECK_INPUT(isMomentType != 1, err << pointAndMomentErr);
token += 3; // skip Fz=
fz = atof(token);
isMomentType = 0;
}
else if (startswith("Fx=", token) || startswith("fx=", token))
{
CHECK_INPUT(isMomentType != 1, err << pointAndMomentErr);
token += 3; // skip Fx=
fx = atof(token);
isMomentType = 0;
}
else if (startswith("Fy=", token) || startswith("fy=", token))
{
CHECK_INPUT(isMomentType != 1, err << pointAndMomentErr);
token += 3; // skip Fy=
fy = atof(token);
isMomentType = 0;
}
else if (startswith("t0=", token))
{
token += 3; // skip t0=
t0 = atof(token);
}
else if (startswith("freq=", token))
{
token += 5; // skip freq=
freq = atof(token);
CHECK_INPUT(freq > 0,
err << "source command: Frequency must be > 0");
}
else if (startswith("f0=", token))
{
CHECK_INPUT(isMomentType != 1,
err << "source command: Cannot set force amplitude for moment tensor terms");
token += strlen("f0=");
f0 = atof(token);
}
else if (startswith("type=",token))
{
token += 5;
strncpy(formstring, token,100);
if (!strcmp("Ricker",formstring))
tDep = iRicker;
else if (!strcmp("Gaussian",formstring))
tDep = iGaussian;
else if (!strcmp("Ramp",formstring))
tDep = iRamp;
else if (!strcmp("Triangle",formstring))
tDep = iTriangle;
else if (!strcmp("Sawtooth",formstring))
tDep = iSawtooth;
else if (!strcmp("SmoothWave",formstring))
tDep = iSmoothWave;
else if (!strcmp("Erf",formstring) || !strcmp("GaussianInt",formstring) )
tDep = iErf;
else if (!strcmp("VerySmoothBump",formstring))
tDep = iVerySmoothBump;
else if (!strcmp("RickerInt",formstring) )
tDep = iRickerInt;
else if (!strcmp("Brune",formstring) )
tDep = iBrune;
else if (!strcmp("BruneSmoothed",formstring) )
tDep = iBruneSmoothed;
else if (!strcmp("DBrune",formstring) )
tDep = iDBrune;
else if (!strcmp("GaussianWindow",formstring) )
tDep = iGaussianWindow;
else if (!strcmp("Liu",formstring) )
tDep = iLiu;
else if (!strcmp("Dirac",formstring) )
tDep = iDirac;
else if (!strcmp("C6SmoothBump",formstring) )
tDep = iC6SmoothBump;
else
if (m_myrank == 0)
cout << "unknown time function: " << formstring << endl << " using default RickerInt function." << endl;
}
else
{
badOption("source", token);
}
token = strtok(NULL, " \t");
}
CHECK_INPUT(depthSet || zSet,
err << "source command: depth, topodepth or z-coordinate must be specified");
if (depthSet)
{
z = depth;
}
if (cartCoordSet)
{
float_sw4 xmin = 0.;
float_sw4 ymin = 0.;
float_sw4 zmin;
// only check the z>zmin when we have topography. For a flat free surface, we will remove sources too
// close or above the surface in the call to mGlobalUniqueSources[i]->correct_Z_level()
if (m_topography_exists) // topography command must be read before the source command
zmin = m_global_zmin;
else
zmin = 0;
if ( (m_topography_exists && (x < xmin || x > m_global_xmax || y < ymin || y > m_global_ymax )) ||
(!m_topography_exists && (x < xmin || x > m_global_xmax || y < ymin || y > m_global_ymax ||
z < zmin || z > m_global_zmax)) )
{
stringstream sourceposerr;
sourceposerr << endl
<< "***************************************************" << endl
<< " FATAL ERROR: Source positioned outside grid! " << endl
<< endl
<< " Source Type: " << formstring << endl
<< " @ x=" << x
<< " y=" << y << " z=" << z << endl
<< endl;
if ( x < xmin )
sourceposerr << " x is " << xmin - x <<
" meters away from min x (" << xmin << ")" << endl;
else if ( x > m_global_xmax)
sourceposerr << " x is " << x - m_global_xmax <<
" meters away from max x (" << m_global_xmax << ")" << endl;
if ( y < ymin )
sourceposerr << " y is " << ymin - y <<
" meters away from min y (" << ymin << ")" << endl;
else if ( y > m_global_ymax)
sourceposerr << " y is " << y - m_global_ymax <<
" meters away from max y (" << m_global_ymax << ")" << endl;
if ( z < zmin )
sourceposerr << " z is " << zmin - z <<
" meters away from min z (" << zmin << ")" << endl;
else if ( z > m_global_zmax)
sourceposerr << " z is " << z - m_global_zmax <<
" meters away from max z (" << m_global_zmax << ")" << endl;
sourceposerr << "***************************************************" << endl;
if (m_myrank == 0)
cout << sourceposerr.str();
MPI_Abort(MPI_COMM_WORLD, 1);
}
}
if (isMomentType)
{
// Remove amplitude variable
mxx *= m0;
mxy *= m0;
mxz *= m0;
myy *= m0;
myz *= m0;
mzz *= m0;
// these have global location since they will be used by all processors
sourcePtr = new Source(this, freq, t0, x, y, z, mxx, mxy, mxz, myy, myz, mzz,
tDep, formstring, topodepth, ncyc, par, npar, ipar, nipar, false ); // false is correctStrengthForMu
if (sourcePtr->ignore())
{
delete sourcePtr;
}
else
{
m_globalUniqueSources.push_back(sourcePtr);
}
}
else // point forcing
{
// Remove amplitude variable
fx *= f0;
fy *= f0;
fz *= f0;
// global version (gets real coordinates)
sourcePtr = new Source(this, freq, t0, x, y, z, fx, fy, fz, tDep, formstring, topodepth, ncyc,
par, npar, ipar, nipar, false ); // false is correctStrengthForMu
//...and add it to the list of forcing terms
if (sourcePtr->ignore())
{
delete sourcePtr;
}
else
{
m_globalUniqueSources.push_back(sourcePtr);
}
}
}
//-----------------------------------------------------------------------
void EW::processSuperGrid(char *buffer)
{
char* token = strtok(buffer, " \t");
token = strtok(NULL, " \t");
int sg_thickness; // sg_transition;
float_sw4 sg_coeff;
bool thicknessSet=false, dampingCoeffSet=false; // , transitionSet=false
while (token != NULL)
{
if (startswith("#", token) || startswith(" ", buffer))
// Ignore commented lines and lines with just a space.
break;
if (startswith("gp=", token)) // in number of grid sizes (different from WPP)
{
token += 3;
sg_thickness = atoi(token);
CHECK_INPUT(sg_thickness>0, "The number of grid points in the supergrid damping layer must be positive, not: "<< sg_thickness);
thicknessSet = true;
}
else if (startswith("dc=", token))
{
token += 3;
sg_coeff = atof(token);
CHECK_INPUT(sg_coeff>=0., "The supergrid damping coefficient must be non-negative, not: "<<sg_coeff);
dampingCoeffSet=true;
}
else
{
badOption("supergrid", token);
}
token = strtok(NULL, " \t");
} // end while token
if (thicknessSet)
m_sg_gp_thickness = sg_thickness;
if (dampingCoeffSet)
m_supergrid_damping_coefficient = sg_coeff;
else if( m_sg_damping_order == 4 )
m_supergrid_damping_coefficient = 0.02;
else if( m_sg_damping_order == 6 )
m_supergrid_damping_coefficient = 0.005;
}
//-----------------------------------------------------------------------
void EW::processDeveloper(char* buffer)
{
char* token = strtok(buffer, " \t");
CHECK_INPUT(strcmp("developer", token) == 0, "ERROR: not a developer line...: " << token);
token = strtok(NULL, " \t");
while (token != NULL)
{
// while there are tokens in the string still
if (startswith("#", token) || startswith(" ", buffer))
// Ignore commented lines and lines with just a space.
break;
if( startswith("cfl=",token) )
{
token += 4;
float_sw4 cfl = atof(token);
CHECK_INPUT( cfl > 0, "Error negative CFL number");
// set_cflnumber( cfl );
mCFL = cfl;
}
else if( startswith("checkfornan=",token) )
{
token += 12;
m_checkfornan = strcmp(token,"1")==0 || strcmp(token,"on")==0 || strcmp(token,"yes")==0;
}
else if( startswith("reporttiming=",token) )
{
token += 13;
m_output_detailed_timing = strcmp(token,"1")==0 || strcmp(token,"on")==0 || strcmp(token,"yes")==0;
}
else if( startswith("trace=",token) )
{
token += 6;
m_save_trace = strcmp(token,"yes")==0
|| strcmp(token,"1")==0 || strcmp(token,"on")==0;
}
else if( startswith("thblocki=",token) )
{
token += 9;
m_gpu_blocksize[0] = atoi(token);
}
else if( startswith("thblockj=",token) )
{
token += 9;
m_gpu_blocksize[1] = atoi(token);
}
else if( startswith("thblockk=",token) )
{
token += 9;
m_gpu_blocksize[2] = atoi(token);
}
else if( startswith("corder=",token) )
{
token += 7;
m_corder = strcmp(token,"yes")==0
|| strcmp(token,"1")==0 || strcmp(token,"on")==0;
Sarray::m_corder = m_corder;
}
else
{
badOption("developer", token);
}
token = strtok(NULL, " \t");
}
}
//------------------------------------------------------------------------
void EW::processMaterialBlock( char* buffer )
{
float_sw4 vpgrad=0.0, vsgrad=0.0, rhograd=0.0;
bool x1set=false, x2set=false, y1set=false, y2set=false,
z1set=false, z2set=false;
float_sw4 x1=0.0, x2=0.0, y1=0.0, y2=0.0, z1=0.0, z2=0.0;
// int i1=-1, i2=-1, j1=-1, j2=-1, k1=-1, k2=-1;
string name = "Block";
char* token = strtok(buffer, " \t");
CHECK_INPUT(strcmp("block", token) == 0,
"ERROR: material block can be set by a block line, not: " << token);
string err = token;
err += " Error: ";
token = strtok(NULL, " \t");
float_sw4 vp=-1, vs=-1, rho=-1, qp=-1, qs=-1, freq=1;
bool absDepth=false;
while (token != NULL)
{
// while there are tokens in the string still
if (startswith("#", token) || startswith(" ", buffer))
// Ignore commented lines and lines with just a space.
break;
// the xygrad keywords must occur before the corresponding xy keywords
if (startswith("rhograd=", token))
{
token += 8; // skip rhograd=
rhograd = atof(token);
}
else if (startswith("vpgrad=", token))
{
token += 7; // skip vpgrad=
vpgrad = atof(token);
}
else if (startswith("vsgrad=", token))
{
token += 7; // skip vsgrad=
vsgrad = atof(token);
}
else if (startswith("vp=", token) )
{
token += 3; // skip vp=
vp = atof(token);
}
else if (startswith("vs=", token) )
{
token += 3; // skip vs=
vs = atof(token);
}
else if (startswith("rho=", token))
{
token += 4; // skip rho=
rho = atof(token);
}
else if (startswith("r=", token)) // superseded by rho=, but keep for backward compatibility
{
token += 2; // skip r=
rho = atof(token);
}
else if (startswith("Qs=", token) || startswith("qs=",token) )
{
token += 3; // skip qs=
qs = atof(token);
}
else if (startswith("Qp=", token) || startswith("qp=",token) )
{
token += 3; // skip qp=
qp = atof(token);
}
else if (startswith("absdepth=", token) )
{
token += 9; // skip absdepth=
absDepth = (bool) atoi(token);
}
else if (startswith("x1=", token))
{
token += 3; // skip x1=
x1 = atof(token);
x1set = true;
}
else if (startswith("x2=", token))
{
token += 3; // skip x2=
x2 = atof(token);
x2set = true;
}
else if (startswith("y1=", token))
{
token += 3; // skip y1=
y1 = atof(token);
y1set = true;
}
else if (startswith("y2=", token))
{
token += 3; // skip y2=
y2 = atof(token);
y2set = true;
}
else if (startswith("z1=", token))
{
token += 3; // skip z1=
z1 = atof(token);
z1set = true;
}
else if (startswith("z2=", token))
{
token += 3; // skip z2=
z2 = atof(token);
z2set = true;
}
else
{
badOption("block", token);
}
token = strtok(NULL, " \t");
}
// End parsing...
// Set up a block on the EW object.
if (x1set)
{
CHECK_INPUT(x1 <= m_global_xmax,
err << "x1 is greater than the maximum x, "
<< x1 << " > " << m_global_xmax);
}
else
x1 = -m_global_xmax; //x1 = 0.;
if (x2set)
{
CHECK_INPUT(x2 >= 0.,
err << "x2 is less than the minimum x, "
<< x2 << " < " << 0.);
}
else
x2 = 2.*m_global_xmax;//x2 = m_global_xmax;
CHECK_INPUT( x2 >= x1, " (x1..x2), upper bound is smaller than lower bound");
//--------------------------------------------------------
// Set j bounds, goes with Y in WPP
//--------------------------------------------------------
if (y1set)
{
CHECK_INPUT(y1 <= m_global_ymax,
err << "y1 is greater than the maximum y, " << y1 << " > " << m_global_ymax);
}
else
y1 = -m_global_ymax;//y1 = 0.;
if (y2set)
{
CHECK_INPUT(y2 >= 0.,
err << "y2 is less than the minimum y, " << y2 << " < " << 0.);
}
else
y2 = 2.*m_global_ymax;//y2 = m_global_ymax;
CHECK_INPUT( y2 >= y1, " (y1..y2), upper bound is smaller than lower bound");
if (z1set)
{
CHECK_INPUT(z1 <= m_global_zmax,
err << "z1 is greater than the maximum z, " << z1 << " > " << m_global_zmax);
}
else
z1 = m_global_zmin - (m_global_zmax-m_global_zmin);
if (z2set)
{
CHECK_INPUT(topographyExists() || z2 >= 0.,
err << "z2 is less than the minimum z, " << z2 << " < " << 0.);
}
else
z2 = m_global_zmax + (m_global_zmax-m_global_zmin);
CHECK_INPUT( z2 >= z1, " (z1..z2), upper bound is smaller than lower bound");
if( getVerbosity() >=2 && m_myrank == 0 )
cout << name << " has bounds " << x1 << " " << x2 << " " << y1 << " "
<< y2 << " " << z1 << " " << z2 << endl;
CHECK_INPUT( vs > 0 && vp > 0 && rho > 0 , "Error in block " << name << " vp vs rho are "
<< vp << " " << vs << " " << rho );
MaterialBlock* bl = new MaterialBlock( this ,rho, vs, vp, x1, x2, y1, y2, z1, z2, qs, qp, freq );
bl->set_gradients( rhograd, vsgrad, vpgrad );
bl->set_absoluteDepth( absDepth );
m_mtrlblocks.push_back(bl);
}
//-----------------------------------------------------------------------
void EW::processReceiver(char* buffer )
{
float_sw4 x=0.0, y=0.0, z=0.0;
float_sw4 lat = 0.0, lon = 0.0, depth = 0.0;
bool cartCoordSet = false, geoCoordSet = false;
string fileName = "station";
string staName = "station";
bool staNameGiven=false;
int writeEvery = 1000;
bool topodepth = false;
bool usgsformat = 0, sacformat=1; // default is to write sac files
TimeSeries::receiverMode mode=TimeSeries::Displacement;
char* token = strtok(buffer, " \t");
bool nsew=false;
// cerr << "******************** INSIDE process receiver *********************" << endl;
CHECK_INPUT(strcmp("rec", token) == 0 || strcmp("sac", token) == 0, "ERROR: not a rec line...: " << token);
token = strtok(NULL, " \t");
string err = "RECEIVER Error: ";
while (token != NULL)
{
// while there are tokens in the string still
// cout << m_myRank << " token " << token <<"x"<<endl;
if (startswith("#", token) || startswith(" ", buffer))
// Ignore commented lines and lines with just a space.
break;
if (startswith("x=", token))
{
CHECK_INPUT(!geoCoordSet,
err << "receiver command: Cannot set both a geographical (lat, lon) and a cartesian (x,y) coordinate");
token += 2; // skip x=
cartCoordSet = true;
x = atof(token);
CHECK_INPUT(x >= 0.0,
"receiver command: x must be greater than or equal to 0, not " << x);
CHECK_INPUT(x <= m_global_xmax,
"receiver command: x must be less than or equal to xmax, not " << x);
}
else if (startswith("y=", token))
{
CHECK_INPUT(!geoCoordSet,
err << "receiver command: Cannot set both a geographical (lat, lon) and a cartesian (x,y) coordinate");
token += 2; // skip y=
cartCoordSet = true;
y = atof(token);
CHECK_INPUT(y >= 0.0,
"receiver command: y must be greater than or equal to 0, not " << y);
CHECK_INPUT(y <= m_global_ymax,
"receiver command: y must be less than or equal to ymax, not " << y);
}
else if (startswith("lat=", token))
{
CHECK_INPUT(!cartCoordSet,
err << "receiver command: Cannot set both a geographical (lat, lon) and a cartesian (x,y) coordinate");
token += 4; // skip lat=
lat = atof(token);
CHECK_INPUT(lat >= -90.0,
"receiver command: lat must be greater than or equal to -90 degrees, not "
<< lat);
CHECK_INPUT(lat <= 90.0,
"receiver command: lat must be less than or equal to 90 degrees, not "
<< lat);
geoCoordSet = true;
}
else if (startswith("lon=", token))
{
CHECK_INPUT(!cartCoordSet,
err << "receiver command: Cannot set both a geographical (lat, lon) and a cartesian (x,y) coordinate");
token += 4; // skip lon=
lon = atof(token);
CHECK_INPUT(lon >= -180.0,
"receiver command: lon must be greater or equal to -180 degrees, not "
<< lon);
CHECK_INPUT(lon <= 180.0,
"receiver command: lon must be less than or equal to 180 degrees, not "
<< lon);
geoCoordSet = true;
}
else if (startswith("z=", token))
{
token += 2; // skip z=
depth = z = atof(token);
topodepth = false; // absolute depth (below mean sea level)
CHECK_INPUT(z <= m_global_zmax,
"receiver command: z must be less than or equal to zmax, not " << z);
}
else if (startswith("depth=", token))
{
token += 6; // skip depth=
z = depth = atof(token);
topodepth = true; // by depth we here mean depth below topography
CHECK_INPUT(depth >= 0.0,
err << "receiver command: depth must be greater than or equal to zero");
CHECK_INPUT(depth <= m_global_zmax,
"receiver command: depth must be less than or equal to zmax, not " << depth);
}
else if (startswith("topodepth=", token))
{
token += 10; // skip topodepth=
z = depth = atof(token);
topodepth = true; // by depth we here mean depth below topography
CHECK_INPUT(depth >= 0.0,
err << "receiver command: depth must be greater than or equal to zero");
CHECK_INPUT(depth <= m_global_zmax,
"receiver command: depth must be less than or equal to zmax, not " << depth);
}
else if(startswith("file=", token))
{
token += 5; // skip file=
fileName = token;
}
else if (startswith("sta=", token))
{
token += strlen("sta=");
staName = token;
staNameGiven=true;
}
else if( startswith("nsew=", token) )
{
token += strlen("nsew=");
nsew = atoi(token) == 1;
}
else if (startswith("writeEvery=", token))
{
token += strlen("writeEvery=");
writeEvery = atoi(token);
CHECK_INPUT(writeEvery >= 0,
err << "sac command: writeEvery must be set to a non-negative integer, not: " << token);
}
else if( startswith("usgsformat=", token) )
{
token += strlen("usgsformat=");
usgsformat = atoi(token);
}
else if( startswith("sacformat=", token) )
{
token += strlen("sacformat=");
sacformat = atoi(token);
}
else if( startswith("variables=", token) )
{
token += strlen("variables=");
if( strcmp("displacement",token)==0 )
{
mode = TimeSeries::Displacement;
}
else if( strcmp("velocity",token)==0 )
{
mode = TimeSeries::Velocity;
}
else if( strcmp("div",token)==0 )
{
mode = TimeSeries::Div;
}
else if( strcmp("curl",token)==0 )
{
mode = TimeSeries::Curl;
}
else if( strcmp("strains",token)==0 )
{
mode = TimeSeries::Strains;
}
else if( strcmp("displacementgradient",token)==0 )
{
mode = TimeSeries::DisplacementGradient;
}
else
{
if (m_myrank == 0 )
cout << "receiver command: variables=" << token << " not understood" << endl
<< "using default mode (displacement)" << endl << endl;
mode = TimeSeries::Displacement;
}
}
else
{
badOption("receiver", token);
}
token = strtok(NULL, " \t");
}
if (geoCoordSet)
{
computeCartesianCoord(x, y, lon, lat);
// check if (x,y) is within the computational domain
}
if (!staNameGiven)
staName = fileName;
bool inCurvilinear=false;
// we are in or above the curvilinear grid
if ( topographyExists() && z < m_zmin[mNumberOfCartesianGrids-1])
{
inCurvilinear = true;
}
// check if (x,y,z) is not in the global bounding box
if ( !( (inCurvilinear || z >= 0) && x>=0 && x<=m_global_xmax && y>=0 && y<=m_global_ymax))
{
// The location of this station was outside the domain, so don't include it in the global list
if (m_myrank == 0 && getVerbosity() > 0)
{
stringstream receivererr;
receivererr << endl
<< "***************************************************" << endl
<< " WARNING: RECEIVER positioned outside grid!" << endl;
receivererr << " No RECEIVER file will be generated for file = " << fileName << endl;
if (geoCoordSet)
{
receivererr << " @ lon=" << lon << " lat=" << lat << " depth=" << depth << endl << endl;
}
else
{
receivererr << " @ x=" << x << " y=" << y << " z=" << z << endl << endl;
}
receivererr << "***************************************************" << endl;
cerr << receivererr.str();
cerr.flush();
}
}
else
{
TimeSeries *ts_ptr = new TimeSeries(this, fileName, staName, mode, sacformat, usgsformat, x, y, depth,
topodepth, writeEvery, !nsew);
// include the receiver in the global list
m_GlobalTimeSeries.push_back(ts_ptr);
}
}
//-----------------------------------------------------------------------
void EW::defineDimensionsGXY( )
{
//
// Defines the number of grids and dimensions in the x- and y-directions,
// It also defines the parallel decomposition, which is only made in the x-y directions.
//
// The z-direction requires topography to be known before computing dimensions.
// x- and y-dimensions must be defined before the topography is read.
// Hence, we have to 1. Define x and y dimensions,
// 2. Read the topography
// 3. Define z dimensions.
if (mVerbose && m_myrank == 0 )
printf("defineDimensionsGXY: #ghost points=%i, #parallel padding points=%i\n", m_ghost_points, m_ppadding);
// Grids are enumerated from bottom to the top, i.e, g=0 is at the bottom, and g=mNumberOfGrids-1 is at the top.
// Note, this is oposite to the z-coordinate which is largest at the bottom and smallest at the top.
if( m_nz_base > 1 && !m_topography_exists )
{
// Flat
mNumberOfCartesianGrids = mNumberOfGrids = 1;
m_is_curvilinear.push_back(false);
}
else if( m_nz_base > 1 && m_topography_exists )
{
// Curvilinear
mNumberOfGrids = 2;
mNumberOfCartesianGrids = 1;
m_is_curvilinear.push_back(false);
m_is_curvilinear.push_back(true);
}
else
if( m_myrank == 0 )
cout << "ERROR in defineDimensionsXY, domain could not be defined" << endl;
// Compute parallel decomposition
int nx_finest_w_ghost = m_nx_base+2*m_ghost_points;
int ny_finest_w_ghost = m_ny_base+2*m_ghost_points;
proc_decompose_2d( nx_finest_w_ghost, ny_finest_w_ghost, m_nprocs, m_nprocs_2d );
int is_periodic[2]={0,0};
MPI_Cart_create( MPI_COMM_WORLD, 2, m_nprocs_2d, is_periodic, true, &m_cartesian_communicator );
// int my_proc_coords[2];
MPI_Cart_get( m_cartesian_communicator, 2, m_nprocs_2d, is_periodic, m_myrank_2d );
MPI_Cart_shift( m_cartesian_communicator, 0, 1, m_neighbor, m_neighbor+1 );
MPI_Cart_shift( m_cartesian_communicator, 1, 1, m_neighbor+2, m_neighbor+3 );
if( m_myrank == 0 && mVerbose >= 3)
{
cout << " Grid distributed on " << m_nprocs << " processors " << endl;
cout << " Finest grid size " << nx_finest_w_ghost << " x " << ny_finest_w_ghost << endl;
cout << " Processor array " << m_nprocs_2d[0] << " x " << m_nprocs_2d[1] << endl;
}
int ifirst, ilast, jfirst, jlast;
decomp1d( nx_finest_w_ghost, m_myrank_2d[0], m_nprocs_2d[0], ifirst, ilast );
decomp1d( ny_finest_w_ghost, m_myrank_2d[1], m_nprocs_2d[1], jfirst, jlast );
ifirst -= m_ghost_points;
ilast -= m_ghost_points;
jfirst -= m_ghost_points;
jlast -= m_ghost_points;
// Define dimension arrays
mGridSize.resize(mNumberOfGrids);
m_global_nx.resize(mNumberOfGrids);
m_global_ny.resize(mNumberOfGrids);
m_iStart.resize(mNumberOfGrids);
m_iEnd.resize(mNumberOfGrids);
m_jStart.resize(mNumberOfGrids);
m_jEnd.resize(mNumberOfGrids);
m_iStartInt.resize(mNumberOfGrids);
m_iEndInt.resize(mNumberOfGrids);
m_jStartInt.resize(mNumberOfGrids);
m_jEndInt.resize(mNumberOfGrids);
// Compute decomposition of x-y dimensions.
for( int g = 0 ; g < mNumberOfGrids; g++ )
{
mGridSize[g] = m_h_base;
m_global_nx[g] = m_nx_base;
m_global_ny[g] = m_ny_base;
// save the local index bounds
m_iStart[g] = ifirst;
m_iEnd[g] = ilast;
m_jStart[g] = jfirst;
m_jEnd[g] = jlast;
// local index bounds for interior points (= no ghost or parallel padding points)
if (ifirst == 1-m_ghost_points)
m_iStartInt[g] = 1;
else
m_iStartInt[g] = ifirst+m_ppadding;
if (ilast == m_global_nx[g] + m_ghost_points)
m_iEndInt[g] = m_global_nx[g];
else
m_iEndInt[g] = ilast - m_ppadding;
if (jfirst == 1-m_ghost_points)
m_jStartInt[g] = 1;
else
m_jStartInt[g] = jfirst+m_ppadding;
if (jlast == m_global_ny[g] + m_ghost_points)
m_jEndInt[g] = m_global_ny[g];
else
m_jEndInt[g] = jlast - m_ppadding;
}
// Set up arrays of arrays.
// Materials
mMu.resize(mNumberOfGrids);
mLambda.resize(mNumberOfGrids);
mRho.resize(mNumberOfGrids);
// Super-grid data
m_sg_dc_x.resize(mNumberOfGrids);
m_sg_dc_y.resize(mNumberOfGrids);
m_sg_dc_z.resize(mNumberOfGrids);
m_sg_str_x.resize(mNumberOfGrids);
m_sg_str_y.resize(mNumberOfGrids);
m_sg_str_z.resize(mNumberOfGrids);
m_sg_corner_x.resize(mNumberOfGrids);
m_sg_corner_y.resize(mNumberOfGrids);
m_sg_corner_z.resize(mNumberOfGrids);
// Boundary information
m_onesided.resize(mNumberOfGrids);
m_bcType.resize(mNumberOfGrids);
// Default values
for( int g= 0 ;g < mNumberOfGrids ; g++ )
{
m_onesided[g] = new int[6];
m_bcType[g] = new boundaryConditionType[6];
for( int side =0 ; side < 6 ; side++ )
{
m_onesided[g][side] = 0;
m_bcType[g][side] = bProcessor;
}
}
}
//-----------------------------------------------------------------------
void EW::defineDimensionsZ()
{
// Assumes that topography is known, and computes the z-direction
// dimensions of arrays.
// Compute average elevation
float_sw4 topo_avg=0;
if( m_topography_exists )
{
float_sw4 tzmin, tzmax;
compute_minmax_topography(tzmin,tzmax);
topo_avg = 0.5*(tzmin+tzmax);
}
m_zmin.resize(mNumberOfGrids);
m_global_nz.resize(mNumberOfGrids);
// Define m_zmin and m_global_nk.
// Adjust m_global_zmin and m_global_zmax, if necessary.
if( m_nz_base > 1 && !m_topography_exists )
{
// Flat
m_global_nz[0] = m_nz_base;
m_zmin[0] = 0;
}
else if( m_nz_base > 1 && m_topography_exists )
{
// Curvilinear
int nz = static_cast<int>(1 + round((m_global_zmax-m_topo_zmax)/m_h_base));
m_global_zmax = m_topo_zmax+(nz-1)*m_h_base;
m_global_nz[0] = nz;
m_zmin[0] = m_topo_zmax;
m_global_nz[1] = static_cast<int>(1 + round((m_topo_zmax - topo_avg)/m_h_base));
m_zmin[1] = 1e38;
}
else
if( m_myrank == 0 )
cout << "ERROR in defineDimensionsZ, elastic domain could not be defined" << endl;
// Define local z-dimension arrays
m_kStart.resize(mNumberOfGrids);
m_kEnd.resize(mNumberOfGrids);
m_kStartInt.resize(mNumberOfGrids);
m_kEndInt.resize(mNumberOfGrids);
for( int g = 0 ; g < mNumberOfGrids; g++ )
{
m_kStart[g] = 1-m_ghost_points;
m_kEnd[g] = m_global_nz[g] + m_ghost_points;
m_kStartInt[g] = 1;
m_kEndInt[g] = m_global_nz[g];
}
if (mVerbose >= 1 && m_myrank == 0)
cout << "Extent of the computational domain xmax=" << m_global_xmax << " ymax=" << m_global_ymax <<
" zmin = " << m_global_zmin << " zmax=" << m_global_zmax << endl;
}
//-----------------------------------------------------------------------
void EW::allocateTopoArrays()
{
if( m_topography_exists )
{
int ifirst = m_iStart[mNumberOfGrids-1];
int ilast = m_iEnd[mNumberOfGrids-1];
int jfirst = m_jStart[mNumberOfGrids-1];
int jlast = m_jEnd[mNumberOfGrids-1];
// Two versions of the topography:
mTopo.define(ifirst,ilast,jfirst,jlast,1,1); // true topography/bathymetry, read directly
// smoothed version of true topography, with an extended number (4 instead of 2 ) of ghost points.
m_ext_ghost_points = 2;
mTopoGridExt.define(ifirst-m_ext_ghost_points,ilast+m_ext_ghost_points,
jfirst-m_ext_ghost_points,jlast+m_ext_ghost_points,1,1);
}
}
//-----------------------------------------------------------------------
void EW::allocateArrays()
{
for( int g=0 ; g < mNumberOfGrids ; g++ )
{
int ifirst = m_iStart[g];
int ilast = m_iEnd[g];
int jfirst = m_jStart[g];
int jlast = m_jEnd[g];
int kfirst = m_kStart[g];
int klast = m_kEnd[g];
// Material data
mMu[g].define(ifirst,ilast,jfirst,jlast,kfirst,klast);
mRho[g].define(ifirst,ilast,jfirst,jlast,kfirst,klast);
mLambda[g].define(ifirst,ilast,jfirst,jlast,kfirst,klast);
// initialize the material coefficients to -1
mMu[g].set_to_minusOne();
mRho[g].set_to_minusOne();
mLambda[g].set_to_minusOne();
// Supergrid arrays
m_sg_dc_x[g] = new float_sw4[ilast-ifirst+1];
m_sg_dc_y[g] = new float_sw4[jlast-jfirst+1];
m_sg_dc_z[g] = new float_sw4[klast-kfirst+1];
m_sg_str_x[g] = new float_sw4[ilast-ifirst+1];
m_sg_str_y[g] = new float_sw4[jlast-jfirst+1];
m_sg_str_z[g] = new float_sw4[klast-kfirst+1];
m_sg_corner_x[g] = new float_sw4[ilast-ifirst+1];
m_sg_corner_y[g] = new float_sw4[jlast-jfirst+1];
m_sg_corner_z[g] = new float_sw4[klast-kfirst+1];
if( m_topography_exists && g == mNumberOfGrids-1 )
{
// Grid and metric
mJ.define(ifirst,ilast,jfirst,jlast,kfirst,klast);
mX.define(ifirst,ilast,jfirst,jlast,kfirst,klast);
mY.define(ifirst,ilast,jfirst,jlast,kfirst,klast);
mZ.define(ifirst,ilast,jfirst,jlast,kfirst,klast);
mMetric.define(4,ifirst,ilast,jfirst,jlast,kfirst,klast);
// Initialization, to touch memory in case OpenMP is in use
mJ.set_to_zero();
mX.set_to_zero();
mY.set_to_zero();
mZ.set_to_zero();
mMetric.set_to_zero();
}
}
}
//-----------------------------------------------------------------------
void EW::printGridSizes() const
{
if (m_myrank == 0)
{
int nx, ny, nz;
float_sw4 nTot=0.;
printf("\nGlobal grid sizes (without ghost points)\n");
printf("Grid h Nx Ny Nz Points\n");
for (int g = 0; g < mNumberOfGrids; g++)
{
nx = m_global_nx[g];
ny = m_global_ny[g];
nz = m_kEnd[g] - m_ghost_points;
nTot += ((long long int)nx)*ny*nz;
printf("%4i %9g %9i %9i %9i %12lld\n", g, mGridSize[g], nx, ny, nz, ((long long int)nx)*ny*nz);
}
printf("Total number of grid points (without ghost points): %g\n\n", nTot);
}
}
//-----------------------------------------------------------------------
bool EW::parseInputFile( const string& filename )
{
char buffer[256];
bool foundGrid = false;
MPI_Barrier(MPI_COMM_WORLD);
ifstream inputFile;
inputFile.open(filename.c_str());
if (!inputFile.is_open())
{
if (m_myrank == 0)
cerr << endl << "ERROR: Failure opening input file: " << filename << endl;
return false;
}
while (!inputFile.eof())
{
inputFile.getline(buffer, 256);
if( startswith("grid", buffer) )
{
foundGrid = true;
processGrid(buffer);
}
// Need process developer before setupMPICommunication, because of array ordering m_corder
else if(startswith("developer", buffer))
processDeveloper(buffer);
else if (startswith("topography", buffer))
processTopography(buffer);
else if( startswith("fileio",buffer))
processFileIO(buffer);
}
if (!foundGrid)
if (m_myrank == 0)
{
cerr << "ERROR: No grid found in input file: " << filename << endl;
return false;
}
defineDimensionsGXY();
if( m_topography_exists )
{
allocateTopoArrays();
if( m_topoInputStyle == EW::GaussianHill )
buildGaussianHillTopography(m_GaussianAmp, m_GaussianLx, m_GaussianLy, m_GaussianXc, m_GaussianYc);
}
defineDimensionsZ();
setupMPICommunications();
allocateArrays();
if( m_topography_exists )
{
generate_grid();
setup_metric();
}
// output grid size info
printGridSizes();
// set default boundary conditions,
default_bcs();
inputFile.clear();
inputFile.seekg(0, ios::beg); // reset file pointer to the beginning of the input file
while (!inputFile.eof())
{
inputFile.getline(buffer, 256);
if (strlen(buffer) > 0) // empty lines produce this
{
if (startswith("#", buffer) ||
startswith("grid", buffer) ||
startswith("developer", buffer) ||
startswith("topography", buffer) ||
startswith("fileio", buffer) ||
startswith("\n", buffer) ||
startswith("\r", buffer) )
{
}
else if(startswith("time", buffer))
processTime(buffer);
else if( startswith("source",buffer))
processSource(buffer);
else if( startswith("supergrid",buffer))
processSuperGrid(buffer);
else if(startswith("testpointsource", buffer))
processTestPointSource(buffer);
// else if(startswith("developer", buffer))
// processDeveloper(buffer);
else if( startswith("checkpoint",buffer))
processCheckPoint(buffer);
else if( startswith("restart",buffer))
processRestart(buffer);
else if( startswith("rec",buffer))
processReceiver(buffer);
else if( startswith("block",buffer))
processMaterialBlock(buffer);
else if (!inputFile.eof() && m_myrank == 0)
{
cout << "*** Ignoring command: '" << buffer << "'" << endl;
}
}
}
inputFile.close();
if( m_myrank == 0 )
cout << "Done reading input file " << endl;
MPI_Barrier(MPI_COMM_WORLD);
return true;
}
//-----------------------------------------------------------------------
void EW::setupRun()
{
// Assign values to material data arrays mRho,mMu,mLambda
setup_materials();
// Check if any GPUs are available
find_cuda_device( );
m_cuobj->initialize_gpu(m_myrank);
// setup coefficients for SBP operators
setupSBPCoeff();
// Check that f.d. operators fit inside the domains
check_dimensions();
// Initialize IO
create_output_directory( );
// Set up supergrid
setup_supergrid( );
assign_supergrid_damping_arrays();
// Copy material to GPU
copy_material_to_device();
// B.C. data structures
assign_local_bcs();
setup_boundary_arrays();
// Time step
computeDT( );
// Set up sources:
for( int s=0 ; s < m_globalUniqueSources.size() ; s++)
{
m_globalUniqueSources[s]->set_grid_point_sources4( this, m_point_sources );
}
// Sorting sources on grid index will allow more efficient parallel code with multi-core
sort_grid_point_sources();
if( m_myrank == 0 && m_globalUniqueSources.size() > 0 )
cout << "setup of sources done" << endl;
if( m_cuobj->has_gpu() )
{
copy_point_sources_to_gpu( );
init_point_sourcesCU( );
}
// Setup I/O in check points
if( m_restart_check_point != CheckPoint::nil )
m_restart_check_point->setup_sizes();
for( int c = 0 ; c < m_check_points.size() ; c++ )
m_check_points[c]->setup_sizes();
if( m_myrank == 0 && (m_restart_check_point != CheckPoint::nil || m_check_points.size() > 0) )
cout << "setup of check point file done" << endl;
}
//-----------------------------------------------------------------------
void EW::timesteploop( vector<Sarray>& U, vector<Sarray>& Um )
{
// input: U,Um,mMu,mLambda,mRho,
// local arrays: F, Up, Lu, Uacc
vector<Sarray> F, Lu, Uacc, Up;
// Pointer to Sarray on device, not sure if std::vector is available.
Sarray* dev_F, *dev_Um, *dev_U, *dev_Up, *dev_metric, *dev_j;
float_sw4* gridsize_dev;
// Do all timing in double, time differences have to much cancellation for float.
double time_start_solve = MPI_Wtime();
bool saveerror = false;
// Define local arrays
F.resize(mNumberOfGrids);
Lu.resize(mNumberOfGrids);
Uacc.resize(mNumberOfGrids);
Up.resize(mNumberOfGrids);
U.resize(mNumberOfGrids);
Um.resize(mNumberOfGrids);
for( int g=0 ; g < mNumberOfGrids ; g++ )
{
int ifirst = m_iStart[g], ilast = m_iEnd[g];
int jfirst = m_jStart[g], jlast = m_jEnd[g];
int kfirst = m_kStart[g], klast = m_kEnd[g];
F[g].define(3,ifirst,ilast,jfirst,jlast,kfirst,klast);
Lu[g].define(3,ifirst,ilast,jfirst,jlast,kfirst,klast);
Uacc[g].define(3,ifirst,ilast,jfirst,jlast,kfirst,klast);
Up[g].define(3,ifirst,ilast,jfirst,jlast,kfirst,klast);
U[g].define(3,ifirst,ilast,jfirst,jlast,kfirst,klast);
Um[g].define(3,ifirst,ilast,jfirst,jlast,kfirst,klast);
}
// Set up boundary data array
//vector<float_sw4**> BCForcing;
BCForcing.resize(mNumberOfGrids);
for( int g = 0; g <mNumberOfGrids; g++ )
{
BCForcing[g] = new float_sw4*[6];
for (int side=0; side < 6; side++)
{
BCForcing[g][side]=NULL;
if (m_bcType[g][side] == bStressFree || m_bcType[g][side] == bDirichlet || m_bcType[g][side] == bSuperGrid)
{
BCForcing[g][side] = new float_sw4[3*m_NumberOfBCPoints[g][side]];
}
}
}
// Initial data, touch all memory even in
// arrays that do not need values, in order
// to initialize OpenMP with good memory access
for( int g=0 ; g < mNumberOfGrids ; g++ )
{
U[g].set_value(0.0);
Um[g].set_value(0.0);
F[g].set_value(0.0);
Up[g].set_value(0.0);
Uacc[g].set_value(0.0);
Lu[g].set_value(0.0);
}
int beginCycle = 0;
float_sw4 t = mTstart;
if( m_restart_check_point != CheckPoint::nil )
{
m_restart_check_point->read_checkpoint( t, beginCycle, Um, U );
for(int g=0 ; g < mNumberOfGrids ; g++ )
{
communicate_array( U[g], g );
communicate_array( Um[g], g );
}
cartesian_bc_forcing( t, BCForcing, m_globalUniqueSources );
enforceBC( U, mMu, mLambda, t, BCForcing );
cartesian_bc_forcing( t-mDt, BCForcing, m_globalUniqueSources );
enforceBC( Um, mMu, mLambda, t-mDt, BCForcing );
}
beginCycle++;
copy_bcforcing_arrays_to_device();
copy_bctype_arrays_to_device();
copy_bndrywindow_arrays_to_device();
double time_measure[20];
double time_sum[20]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
for (int ts=0; ts<m_GlobalTimeSeries.size(); ts++)
m_GlobalTimeSeries[ts]->allocateRecordingArrays( mNumberOfTimeSteps+1, mTstart, mDt);
if (m_myrank == 0)
{
cout << "Running on " << m_nprocs << " MPI tasks" << endl;
}
#ifdef SW4_OPENMP
#pragma omp parallel
{
if( omp_get_thread_num() == 0 && m_myrank == 0 )
{
int nth=omp_get_num_threads();
cout << "Using OpenMP with " << nth << " thread";
if( nth > 1 )
cout << "s";
cout << " per MPI task" << endl;
}
}
#endif
hipMalloc( (void**)&dev_F, sizeof(Sarray)*mNumberOfGrids);
hipMalloc( (void**)&dev_Um, sizeof(Sarray)*mNumberOfGrids);
hipMalloc( (void**)&dev_U, sizeof(Sarray)*mNumberOfGrids);
hipMalloc( (void**)&dev_Up, sizeof(Sarray)*mNumberOfGrids);
hipMalloc( (void**)&dev_metric, sizeof(Sarray));
hipMalloc( (void**)&dev_j, sizeof(Sarray));
hipMalloc( (void**)&gridsize_dev, sizeof(float_sw4)*mNumberOfGrids);
for( int g=0 ; g < mNumberOfGrids ; g++ )
{
Lu[g].copy_to_device(m_cuobj);
Up[g].copy_to_device(m_cuobj);
Um[g].copy_to_device(m_cuobj);
U[g].copy_to_device(m_cuobj);
Uacc[g].copy_to_device(m_cuobj);
F[g].copy_to_device(m_cuobj);
F[g].page_lock(m_cuobj);
U[g].page_lock(m_cuobj);
Um[g].page_lock(m_cuobj);
Up[g].page_lock(m_cuobj);
}
hipMemcpy( dev_F, &F[0], mNumberOfGrids*sizeof(Sarray), hipMemcpyHostToDevice );
hipMemcpy( dev_Um, &Um[0], mNumberOfGrids*sizeof(Sarray), hipMemcpyHostToDevice );
hipMemcpy( dev_U, &U[0], mNumberOfGrids*sizeof(Sarray), hipMemcpyHostToDevice );
hipMemcpy( dev_Up, &Up[0], mNumberOfGrids*sizeof(Sarray), hipMemcpyHostToDevice );
hipMemcpy( dev_metric, &mMetric, sizeof(Sarray), hipMemcpyHostToDevice );
hipMemcpy( dev_j, &mJ, sizeof(Sarray), hipMemcpyHostToDevice );
hipMemcpy( gridsize_dev, &mGridSize[0], sizeof(float_sw4)*mNumberOfGrids, hipMemcpyHostToDevice );
// save initial data on receiver records
vector<float_sw4> uRec;
for (int ts=0; ts<m_GlobalTimeSeries.size(); ts++)
{
// can't compute a 2nd order accurate time derivative at this point
// therefore, don't record anything related to velocities for the initial data
if (m_GlobalTimeSeries[ts]->getMode() != TimeSeries::Velocity && m_GlobalTimeSeries[ts]->myPoint())
{
int i0 = m_GlobalTimeSeries[ts]->m_i0;
int j0 = m_GlobalTimeSeries[ts]->m_j0;
int k0 = m_GlobalTimeSeries[ts]->m_k0;
int grid0 = m_GlobalTimeSeries[ts]->m_grid0;
extractRecordData(m_GlobalTimeSeries[ts]->getMode(), i0, j0, k0, grid0,
uRec, Um, U);
m_GlobalTimeSeries[ts]->recordData(uRec);
}
}
// Build TimeSeries help data structure for GPU
int* i0dev, *j0dev, *k0dev, *g0dev;
int* modedev;
float_sw4** urec_dev; // array of pointers on device pointing to device memory
float_sw4** urec_host; // array of pointers on host pointing to host memory
float_sw4** urec_hdev; // array of pointers on host pointing to device memory
int nvals=0, ntloc=0;
allocateTimeSeriesOnDeviceCU( nvals, ntloc, i0dev, j0dev, k0dev, g0dev, modedev, urec_dev, urec_host, urec_hdev );
if( m_myrank == 0 )
cout << "starting at time " << t << " at cycle " << beginCycle << endl;
double* trdata;
if( m_save_trace )
{
trdata = new double[12*(mNumberOfTimeSteps+1)];
MPI_Barrier(m_cartesian_communicator);
}
// Set up the array for data communication
setup_device_communication_array();
// Begin time stepping loop
for( int currentTimeStep = beginCycle; currentTimeStep <= mNumberOfTimeSteps; currentTimeStep++ )
{
time_measure[0] = MPI_Wtime();
// all types of forcing...
if( m_cuobj->has_gpu() )
ForceCU( t, dev_F, false, 0 );
else
Force( t, F, m_point_sources, false );
if( m_checkfornan )
{
check_for_nan_GPU( F, 1, "F" );
check_for_nan_GPU( U, 1, "U" );
}
time_measure[1] = MPI_Wtime();
// evaluate right hand side
if( m_cuobj->has_gpu() )
{
// evalRHSCU( U, mMu, mLambda, Lu, 0 ); // save Lu in composite grid 'Lu'
// RHS + predictor in the rest (stream 0)
RHSPredCU_boundary (Up, U, Um, mMu, mLambda, mRho, F, 0);
// Wait for stream 0 to complete
m_cuobj->sync_stream(0);
RHSPredCU_center (Up, U, Um, mMu, mLambda, mRho, F, 1);
}
else
evalRHS( U, mMu, mLambda, Lu ); // save Lu in composite grid 'Lu'
if( m_checkfornan )
check_for_nan_GPU( Lu, 1, "Lu pred. " );
// take predictor step, store in Up
m_cuobj->sync_stream( 0 );
//predictor is merged into RHSPredCU_*
if( ! m_cuobj->has_gpu() )
evalPredictor( Up, U, Um, mRho, Lu, F );
time_measure[2] = MPI_Wtime();
// communicate across processor boundaries
if( m_cuobj->has_gpu() )
{
for(int g=0 ; g < mNumberOfGrids ; g++ )
{
//communicate_arrayCU( Up[g], g, 0);
pack_HaloArrayCU_X (Up[g], g, 0);
communicate_arrayCU_X( Up[g], g, 0);
unpack_HaloArrayCU_X (Up[g], g, 0);
pack_HaloArrayCU_Y (Up[g], g, 0);
communicate_arrayCU_Y( Up[g], g, 0);
unpack_HaloArrayCU_Y (Up[g], g, 0);
}
hipDeviceSynchronize();
}
else
{
for(int g=0 ; g < mNumberOfGrids ; g++ )
communicate_array( Up[g], g );
}
time_measure[3] = MPI_Wtime();
// calculate boundary forcing at time t+mDt
if( m_cuobj->has_gpu() )
{
cartesian_bc_forcingCU( t+mDt, BCForcing, m_globalUniqueSources,0);
enforceBCCU( Up, mMu, mLambda, t+mDt, BCForcing, 0);
}
else
{
cartesian_bc_forcing( t+mDt, BCForcing, m_globalUniqueSources );
enforceBC( Up, mMu, mLambda, t+mDt, BCForcing );
}
if( m_checkfornan )
check_for_nan( Up, 1, "U pred. " );
time_measure[4] = MPI_Wtime();
// Corrector
if( m_cuobj->has_gpu() )
{
ForceCU( t, dev_F, true, 0 );
hipDeviceSynchronize();
}
else
Force( t, F, m_point_sources, true );
time_measure[5] = MPI_Wtime();
if( m_cuobj->has_gpu() )
evalDpDmInTimeCU( Up, U, Um, Uacc, 0 ); // store result in Uacc
else
evalDpDmInTime( Up, U, Um, Uacc ); // store result in Uacc
if( m_checkfornan )
check_for_nan_GPU( Uacc, 1, "uacc " );
if( m_cuobj->has_gpu() )
{
// RHS + corrector in the free surface and halos (stream 0)
RHSCorrCU_boundary (Up, Uacc, mMu, mLambda, mRho, F, 0);
// Add super grid damping terms in the free surface and halos (stream 0)
addSuperGridDampingCU_upper_boundary (Up, U, Um, mRho, 0);
// Wait for stream 0 to complete
m_cuobj->sync_stream(0);
RHSCorrCU_center (Up, Uacc, mMu, mLambda, mRho, F, 1);
}
else
evalRHS( Uacc, mMu, mLambda, Lu );
if( m_checkfornan )
check_for_nan_GPU( Lu, 1, "L(uacc) " );
//corrector is merged into RHSCorrCU_*
if( !m_cuobj->has_gpu() )
evalCorrector( Up, mRho, Lu, F );
time_measure[6] = MPI_Wtime();
// add in super-grid damping terms
if ( m_use_supergrid )
{
if( m_cuobj->has_gpu() )
{
// addSuperGridDampingCU( Up, U, Um, mRho, 0 );
// Add super grid damping terms in the rest of the cube (stream 1)
addSuperGridDampingCU_center (Up, U, Um, mRho, 1);
// Add super grid damping terms in the rest of the cube (stream 1)
m_cuobj->sync_stream(1);
}
else
addSuperGridDamping( Up, U, Um, mRho );
}
time_measure[7] = MPI_Wtime();
// also check out EW::update_all_boundaries
// communicate across processor boundaries
if( m_cuobj->has_gpu() )
for(int g=0 ; g < mNumberOfGrids ; g++ )
{
pack_HaloArrayCU_X (Up[g], g, 0);
communicate_arrayCU_X( Up[g], g, 0 );
unpack_HaloArrayCU_X (Up[g], g, 0);
pack_HaloArrayCU_Y (Up[g], g, 0);
communicate_arrayCU_Y( Up[g], g, 0 );
unpack_HaloArrayCU_Y (Up[g], g, 0);
}
else
for(int g=0 ; g < mNumberOfGrids ; g++ )
communicate_array( Up[g], g );
time_measure[8] = MPI_Wtime();
// calculate boundary forcing at time t+mDt (do we really need to call this fcn again???)
if( m_cuobj->has_gpu() )
{
cartesian_bc_forcingCU( t+mDt, BCForcing, m_globalUniqueSources, 0 );
enforceBCCU( Up, mMu, mLambda, t+mDt, BCForcing, 0 );
}
else
{
cartesian_bc_forcing( t+mDt, BCForcing, m_globalUniqueSources );
enforceBC( Up, mMu, mLambda, t+mDt, BCForcing );
}
if( m_checkfornan )
check_for_nan( Up, 1, "Up" );
// increment time
t += mDt;
time_measure[9] = MPI_Wtime();
// periodically, print time stepping info to stdout
printTime( currentTimeStep, t, currentTimeStep == mNumberOfTimeSteps );
// Images have to be written before the solution arrays are cycled, because both Up and Um are needed
// to compute a centered time derivative
//
m_cuobj->sync_stream(0);
double time_chkpt, time_chkpt_tmp;
bool wrote=false;
time_chkpt=MPI_Wtime();
for( int c=0 ; c < m_check_points.size() ; c++ )
if( m_check_points[c]->timeToWrite( t, currentTimeStep, mDt) )
{
for( int g=0 ; g < mNumberOfGrids ; g++ )
{
U[g].copy_from_device(m_cuobj,true,0);
Up[g].copy_from_device(m_cuobj,true,1);
}
hipDeviceSynchronize();
m_check_points[c]->write_checkpoint( t, currentTimeStep, U, Up );
wrote=true;
}
if( wrote )
{
time_chkpt_tmp =MPI_Wtime()-time_chkpt;
MPI_Allreduce( &time_chkpt_tmp, &time_chkpt, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD );
if( m_myrank == 0 )
cout << "Cpu time to write check point file " << time_chkpt << " seconds " << endl;
}
// save the current solution on receiver records (time-derivative require Up and Um for a 2nd order
// approximation, so do this before cycling the arrays)
if( m_cuobj->has_gpu() )
{
if( ntloc > 0 )
{
extractRecordDataCU( ntloc, modedev, i0dev, j0dev, k0dev, g0dev, urec_dev, dev_Um, dev_Up,
mDt, gridsize_dev, dev_metric, dev_j, 0, nvals, urec_host[0], urec_hdev[0] );
// Note: extractRecordDataCU performs hipMemcpy of dev data to host, no explicit synchronization needed.
int tsnr=0;
for( int ts=0 ; ts < m_GlobalTimeSeries.size() ; ts++ )
if( m_GlobalTimeSeries[ts]->myPoint() )
m_GlobalTimeSeries[ts]->recordData(urec_host[tsnr++]);
}
}
else
{
for (int ts=0; ts<m_GlobalTimeSeries.size(); ts++)
{
if (m_GlobalTimeSeries[ts]->myPoint())
{
int i0 = m_GlobalTimeSeries[ts]->m_i0;
int j0 = m_GlobalTimeSeries[ts]->m_j0;
int k0 = m_GlobalTimeSeries[ts]->m_k0;
int grid0 = m_GlobalTimeSeries[ts]->m_grid0;
//
// note that the solution on the new time step is in Up
// also note that all quantities related to velocities lag by one time step; they are not
// saved before the time stepping loop started
extractRecordData(m_GlobalTimeSeries[ts]->getMode(), i0, j0, k0, grid0, uRec, Um, Up);
m_GlobalTimeSeries[ts]->recordData(uRec);
}
}
}
// // Energy evaluation, requires all three time levels present, do before cycle arrays.
// if( m_energy_test )
// compute_energy( mDt, currentTimeStep == mNumberOfTimeSteps, Um, U, Up, currentTimeStep );
// cycle the solution arrays
cycleSolutionArrays(Um, U, Up, dev_Um, dev_U, dev_Up );
// time_measure[8] = MPI_Wtime();
time_measure[10] = MPI_Wtime();
// evaluate error for some test cases
// if (m_lamb_test || m_point_source_test || m_rayleigh_wave_test )
if ( m_point_source_test && saveerror )
{
float_sw4 errInf=0, errL2=0, solInf=0; //, solL2=0;
exactSol( t, Up, m_globalUniqueSources ); // store exact solution in Up
// // if (m_lamb_test)
// // normOfSurfaceDifference( Up, U, errInf, errL2, solInf, solL2, a_Sources);
normOfDifference( Up, U, errInf, errL2, solInf, m_globalUniqueSources );
if ( m_myrank == 0 )
cout << t << " " << errInf << " " << errL2 << " " << solInf << endl;
}
// time_measure[9] = MPI_Wtime();
time_measure[11] = MPI_Wtime();
// // See if it is time to write a restart file
// // if (mRestartDumpInterval > 0 && currentTimeStep % mRestartDumpInterval == 0)
// // serialize(currentTimeStep, U, Um);
if( currentTimeStep > 1 )
{
time_sum[0] += time_measure[1]-time_measure[0] + time_measure[5]-time_measure[4]; // F
time_sum[1] += time_measure[2]-time_measure[1] + time_measure[6]-time_measure[5]; // RHS
time_sum[2] += time_measure[3]-time_measure[2] + time_measure[8]-time_measure[7]; // bc comm.
time_sum[3] += time_measure[4]-time_measure[3] + time_measure[9]-time_measure[8]; // bc phys.
time_sum[4] += time_measure[7]-time_measure[6]; // super grid damping
time_sum[5] += time_measure[10]-time_measure[9]; // print outs
time_sum[6] += time_measure[11]-time_measure[10]; // compute exact solution
time_sum[7] += time_measure[11]-time_measure[0]; // total measured
}
if( m_save_trace )
for( int s = 0 ; s < 12 ; s++ )
trdata[s+12*(currentTimeStep-beginCycle)]= time_measure[s];
} // end time stepping loop
double time_end_solve = MPI_Wtime();
print_execution_time( time_start_solve, time_end_solve, "solver phase" );
if( m_output_detailed_timing )
print_execution_times( time_sum );
if ( m_point_source_test )
{
if( m_cuobj->has_gpu() )
for( int g=0; g < mNumberOfGrids ; g++ )
U[g].copy_from_device(m_cuobj,true,0);
float_sw4 errInf=0, errL2=0, solInf=0;//, solL2=0;
exactSol( t, Up, m_globalUniqueSources ); // store exact solution in Up
// // if (m_lamb_test)
// // normOfSurfaceDifference( Up, U, errInf, errL2, solInf, solL2, a_Sources);
normOfDifference( Up, U, errInf, errL2, solInf, m_globalUniqueSources );
if ( m_myrank == 0 )
{
cout << "Errors at time " << t << " Linf = " << errInf << " L2 = " << errL2 << " norm of solution = " << solInf << endl;
string fname = mPath+"PointSourceErr.txt";
ofstream esave(fname.c_str());
esave.precision(12);
esave << t << " " << errInf << " " << errL2 << " " << solInf << endl;
esave.close();
}
}
for (int ts=0; ts<m_GlobalTimeSeries.size(); ts++)
m_GlobalTimeSeries[ts]->writeFile();
for( int g= 0 ; g < mNumberOfGrids ; g++ )
{
F[g].page_unlock(m_cuobj);
U[g].page_unlock(m_cuobj);
Um[g].page_unlock(m_cuobj);
Up[g].page_unlock(m_cuobj);
}
m_cuobj->reset_gpu();
if( m_save_trace )
{
char fname[255];
snprintf(fname,255,"%s/trfile%04d.bin",mPath.c_str(),m_myrank);
int fd = open(fname, O_WRONLY|O_TRUNC|O_CREAT, 0660);
int twelve=12;
int nsteps= mNumberOfTimeSteps-beginCycle+1;
size_t nr=write(fd,&twelve,sizeof(int));
nr=write(fd,&nsteps,sizeof(int));
nr=write(fd,trdata,sizeof(double)*twelve*nsteps);
close(fd);
}
}
//-----------------------------------------------------------------------
bool EW::proc_decompose_2d( int ni, int nj, int nproc, int proc_max[2] )
{
// This routine determines a decomposition of nproc processors into
// a 2D processor array proc_max[0] x proc_max[1], which gives minimal
// communication boundary for a grid with ni x nj points.
float_sw4 fmin = ni+nj;
bool first = true;
int p1max = ni/m_ppadding;
int p2max = nj/m_ppadding;
for( int p1 = 1 ; p1 <= nproc; p1++)
if( nproc%p1 == 0 )
{
int p2 = nproc/p1;
if( p1 <= p1max && p2 <= p2max )
{
// try to make each subdomain as square as possible
float_sw4 f = fabs((float_sw4)(ni)/p1 - (float_sw4)(nj)/p2);
if( f < fmin || first )
{
fmin = f;
proc_max[0] = p1;
proc_max[1] = p2;
first= false;
}
}
}
return !first;
}
//-----------------------------------------------------------------------
void EW::decomp1d( int nglobal, int myid, int nproc, int& s, int& e )
//
// Decompose index space 1 <= i <= nglobal into nproc blocks
// returns start and end indices for block nr. myid,
// where 0 <= myid <= nproc-1
//
{
int olap = 2*m_ppadding;
int nlocal = (nglobal + (nproc-1)*olap ) / nproc;
int deficit = (nglobal + (nproc-1)*olap ) % nproc;
if( myid < deficit )
s = myid*(nlocal-olap) + myid+1;
else
s = myid*(nlocal-olap) + deficit+1;
if (myid < deficit)
nlocal = nlocal + 1;
e = s + nlocal - 1;
}
//-----------------------------------------------------------------------
void EW::setupMPICommunications()
{
if (mVerbose >= 1 && m_myrank == 0 )
cout << "***inside setupMPICommunications***"<< endl;
// Define MPI datatypes for communication across processor boundaries
m_send_type1.resize(2*mNumberOfGrids);
m_send_type3.resize(2*mNumberOfGrids);
m_send_type4.resize(2*mNumberOfGrids);
// m_send_type21.resize(2*mNumberOfGrids);
for( int g= 0 ; g < mNumberOfGrids ; g++ )
{
// int ni = mU[g].m_ni, nj=mU[g].m_nj, nk=mU[g].m_nk;
int ni = m_iEnd[g] - m_iStart[g] + 1;
int nj = m_jEnd[g] - m_jStart[g] + 1;
int nk = m_kEnd[g] - m_kStart[g] + 1;
MPI_Type_vector( nj*nk, m_ppadding, ni, m_mpifloat, &m_send_type1[2*g] );
MPI_Type_vector( nk, m_ppadding*ni, ni*nj, m_mpifloat, &m_send_type1[2*g+1] );
if( m_corder )
{
MPI_Type_vector( 3*nj*nk, m_ppadding, ni, m_mpifloat, &m_send_type3[2*g] );
MPI_Type_vector( 3*nk, m_ppadding*ni, ni*nj, m_mpifloat, &m_send_type3[2*g+1] );
MPI_Type_vector( 4*nj*nk, m_ppadding, ni, m_mpifloat, &m_send_type4[2*g] );
MPI_Type_vector( 4*nk, m_ppadding*ni, ni*nj, m_mpifloat, &m_send_type4[2*g+1] );
}
else
{
MPI_Type_vector( nj*nk, 3*m_ppadding, 3*ni, m_mpifloat, &m_send_type3[2*g] );
MPI_Type_vector( nk, 3*m_ppadding*ni, 3*ni*nj, m_mpifloat, &m_send_type3[2*g+1] );
MPI_Type_vector( nj*nk, 4*m_ppadding, 4*ni, m_mpifloat, &m_send_type4[2*g] );
MPI_Type_vector( nk, 4*m_ppadding*ni, 4*ni*nj, m_mpifloat, &m_send_type4[2*g+1] );
}
MPI_Type_commit( &m_send_type1[2*g] );
MPI_Type_commit( &m_send_type1[2*g+1] );
MPI_Type_commit( &m_send_type3[2*g] );
MPI_Type_commit( &m_send_type3[2*g+1] );
MPI_Type_commit( &m_send_type4[2*g] );
MPI_Type_commit( &m_send_type4[2*g+1] );
}
}
//-----------------------------------------------------------------------
bool EW::check_for_nan( vector<Sarray>& a_U, int verbose, string name )
{
bool retval = false;
for( int g=0 ; g<mNumberOfGrids; g++ )
{
size_t nn=a_U[g].count_nans();
retval = retval || nn > 0;
if( nn > 0 && verbose == 1 )
{
int cnan, inan, jnan, knan;
a_U[g].count_nans(cnan,inan,jnan,knan);
cout << "grid " << g << " array " << name << " found " << nn << " nans. First nan at " <<
cnan << " " << inan << " " << jnan << " " << knan << endl;
}
}
return retval;
}
//-----------------------------------------------------------------------
void EW::cycleSolutionArrays(vector<Sarray> & a_Um, vector<Sarray> & a_U,
vector<Sarray> & a_Up, Sarray*& dev_Um,
Sarray*& dev_U, Sarray*& dev_Up )
{
for (int g=0; g<mNumberOfGrids; g++)
{
float_sw4 *tmp = a_Um[g].c_ptr();
a_Um[g].reference(a_U[g].c_ptr());
a_U[g].reference(a_Up[g].c_ptr());
a_Up[g].reference(tmp);
if( m_cuobj->has_gpu() )
{
tmp = a_Um[g].dev_ptr();
a_Um[g].reference_dev( a_U[g].dev_ptr());
a_U[g].reference_dev( a_Up[g].dev_ptr());
a_Up[g].reference_dev(tmp );
}
}
Sarray* tmp = dev_Um;
dev_Um = dev_U;
dev_U = dev_Up;
dev_Up = tmp;
}
//-----------------------------------------------------------------------
void EW::Force(float_sw4 a_t, vector<Sarray> & a_F, vector<GridPointSource*> point_sources,
bool tt )
{
for( int g =0 ; g < mNumberOfGrids ; g++ )
a_F[g].set_to_zero();
#pragma omp parallel for
for( int r=0 ; r<m_identsources.size()-1 ; r++ )
{
int s0 = m_identsources[r];
int g = point_sources[s0]->m_grid;
int i = point_sources[s0]->m_i0;
int j = point_sources[s0]->m_j0;
int k = point_sources[s0]->m_k0;
size_t ind1 = a_F[g].index(1,i,j,k);
size_t oc = a_F[g].m_offc;
float_sw4* fptr =a_F[g].c_ptr();
for( int s=m_identsources[r]; s< m_identsources[r+1] ; s++ )
{
float_sw4 fxyz[3];
if( tt )
point_sources[s]->getFxyztt(a_t,fxyz);
else
point_sources[s]->getFxyz(a_t,fxyz);
fptr[ind1] += fxyz[0];
fptr[ind1+oc] += fxyz[1];
fptr[ind1+2*oc] += fxyz[2];
}
}
}
//---------------------------------------------------------------------------
void EW::evalPredictor(vector<Sarray> & a_Up, vector<Sarray> & a_U, vector<Sarray> & a_Um,
vector<Sarray>& a_Rho, vector<Sarray> & a_Lu, vector<Sarray> & a_F )
{
float_sw4 dt2 = mDt*mDt;
for( int g=0 ; g<mNumberOfGrids; g++ )
{
predfort( m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g], m_kStart[g], m_kEnd[g],
a_Up[g].c_ptr(), a_U[g].c_ptr(), a_Um[g].c_ptr(),
a_Lu[g].c_ptr(), a_F[g].c_ptr(), a_Rho[g].c_ptr(), dt2 );
}
}
//---------------------------------------------------------------------------
void EW::evalCorrector(vector<Sarray> & a_Up, vector<Sarray>& a_Rho,
vector<Sarray> & a_Lu, vector<Sarray> & a_F )
{
float_sw4 dt4 = mDt*mDt*mDt*mDt;
for( int g=0 ; g<mNumberOfGrids; g++ )
{
corrfort( m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g], m_kStart[g], m_kEnd[g],
a_Up[g].c_ptr(), a_Lu[g].c_ptr(), a_F[g].c_ptr(), a_Rho[g].c_ptr(), dt4 );
}
}
//---------------------------------------------------------------------------
void EW::evalDpDmInTime(vector<Sarray> & a_Up, vector<Sarray> & a_U, vector<Sarray> & a_Um,
vector<Sarray> & a_Uacc )
{
float_sw4 dt2i = 1./(mDt*mDt);
for(int g=0 ; g<mNumberOfGrids; g++ )
{
dpdmtfort( m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g], m_kStart[g], m_kEnd[g],
a_Up[g].c_ptr(), a_U[g].c_ptr(), a_Um[g].c_ptr(), a_Uacc[g].c_ptr(), dt2i );
}
}
//-----------------------------------------------------------------------
void EW::evalRHS(vector<Sarray> & a_U, vector<Sarray>& a_Mu, vector<Sarray>& a_Lambda,
vector<Sarray> & a_Uacc )
{
for(int g=0 ; g<mNumberOfCartesianGrids; g++ )
{
if( m_corder )
rhs4sg_rev( m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g],
m_kStart[g], m_kEnd[g], m_global_nz[g], m_onesided[g],
m_acof, m_bope, m_ghcof, a_Uacc[g].c_ptr(), a_U[g].c_ptr(),
a_Mu[g].c_ptr(), a_Lambda[g].c_ptr(), mGridSize[g],
m_sg_str_x[g], m_sg_str_y[g], m_sg_str_z[g] );
else
rhs4sg( m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g],
m_kStart[g], m_kEnd[g], m_global_nz[g], m_onesided[g],
m_acof, m_bope, m_ghcof, a_Uacc[g].c_ptr(), a_U[g].c_ptr(),
a_Mu[g].c_ptr(), a_Lambda[g].c_ptr(), mGridSize[g],
m_sg_str_x[g], m_sg_str_y[g], m_sg_str_z[g] );
#ifdef DEBUG_CUDA
printf("params = %d, %d, %d, %d, %d, %d \n %f, %f, %f, %f \n %f, %f, %f, %f \n %d \n",
m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g],
m_kStart[g], m_kEnd[g],
(a_Uacc[g].c_ptr())[1], (a_U[g].c_ptr())[1],
(a_Mu[g].c_ptr())[1], (a_Lambda[g].c_ptr())[1],
mGridSize[g], m_sg_str_x[g][1], m_sg_str_y[g][1], m_sg_str_z[g][1],
m_ghost_points);
printf("onesided[%d](4,5) = %d, %d\n", g, m_onesided[g][4], m_onesided[g][5]);
#endif
}
if( m_topography_exists )
{
int g=mNumberOfGrids-1;
if( m_corder )
rhs4sgcurv_rev( m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g], m_kStart[g], m_kEnd[g],
a_U[g].c_ptr(), a_Mu[g].c_ptr(), a_Lambda[g].c_ptr(), mMetric.c_ptr(),
mJ.c_ptr(), a_Uacc[g].c_ptr(), m_onesided[g], m_acof, m_bope, m_ghcof,
m_sg_str_x[g], m_sg_str_y[g] );
else
rhs4sgcurv( m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g], m_kStart[g], m_kEnd[g],
a_U[g].c_ptr(), a_Mu[g].c_ptr(), a_Lambda[g].c_ptr(), mMetric.c_ptr(),
mJ.c_ptr(), a_Uacc[g].c_ptr(), m_onesided[g], m_acof, m_bope, m_ghcof,
m_sg_str_x[g], m_sg_str_y[g] );
}
}
//-----------------------------------------------------------------------
void EW::communicate_array( Sarray& u, int grid )
{
REQUIRE2( u.m_nc == 1 || u.m_nc == 3 || u.m_nc == 4,
"Communicate array, only implemented for nc=1,3, and 4 "
<< " nc = " << u.m_nc );
int ie = u.m_ie, ib=u.m_ib, je=u.m_je, jb=u.m_jb, kb=u.m_kb;//,ke=u.m_ke;
MPI_Status status;
if( u.m_nc == 1 )
{
int xtag1 = 345;
int xtag2 = 346;
int ytag1 = 347;
int ytag2 = 348;
// X-direction communication
MPI_Sendrecv( &u(ie-(2*m_ppadding-1),jb,kb), 1, m_send_type1[2*grid], m_neighbor[1], xtag1,
&u(ib,jb,kb), 1, m_send_type1[2*grid], m_neighbor[0], xtag1,
m_cartesian_communicator, &status );
MPI_Sendrecv( &u(ib+m_ppadding,jb,kb), 1, m_send_type1[2*grid], m_neighbor[0], xtag2,
&u(ie-(m_ppadding-1),jb,kb), 1, m_send_type1[2*grid], m_neighbor[1], xtag2,
m_cartesian_communicator, &status );
// Y-direction communication
MPI_Sendrecv( &u(ib,je-(2*m_ppadding-1),kb), 1, m_send_type1[2*grid+1], m_neighbor[3], ytag1,
&u(ib,jb,kb), 1, m_send_type1[2*grid+1], m_neighbor[2], ytag1,
m_cartesian_communicator, &status );
MPI_Sendrecv( &u(ib,jb+m_ppadding,kb), 1, m_send_type1[2*grid+1], m_neighbor[2], ytag2,
&u(ib,je-(m_ppadding-1),kb), 1, m_send_type1[2*grid+1], m_neighbor[3], ytag2,
m_cartesian_communicator, &status );
}
else if( u.m_nc == 3 )
{
int xtag1 = 345;
int xtag2 = 346;
int ytag1 = 347;
int ytag2 = 348;
// X-direction communication
MPI_Sendrecv( &u(1,ie-(2*m_ppadding-1),jb,kb), 1, m_send_type3[2*grid], m_neighbor[1], xtag1,
&u(1,ib,jb,kb), 1, m_send_type3[2*grid], m_neighbor[0], xtag1,
m_cartesian_communicator, &status );
MPI_Sendrecv( &u(1,ib+m_ppadding,jb,kb), 1, m_send_type3[2*grid], m_neighbor[0], xtag2,
&u(1,ie-(m_ppadding-1),jb,kb), 1, m_send_type3[2*grid], m_neighbor[1], xtag2,
m_cartesian_communicator, &status );
// Y-direction communication
MPI_Sendrecv( &u(1,ib,je-(2*m_ppadding-1),kb), 1, m_send_type3[2*grid+1], m_neighbor[3], ytag1,
&u(1,ib,jb,kb), 1, m_send_type3[2*grid+1], m_neighbor[2], ytag1,
m_cartesian_communicator, &status );
MPI_Sendrecv( &u(1,ib,jb+m_ppadding,kb), 1, m_send_type3[2*grid+1], m_neighbor[2], ytag2,
&u(1,ib,je-(m_ppadding-1),kb), 1, m_send_type3[2*grid+1], m_neighbor[3], ytag2,
m_cartesian_communicator, &status );
}
else if( u.m_nc == 4 )
{
int xtag1 = 345;
int xtag2 = 346;
int ytag1 = 347;
int ytag2 = 348;
// X-direction communication
MPI_Sendrecv( &u(1,ie-(2*m_ppadding-1),jb,kb), 1, m_send_type4[2*grid], m_neighbor[1], xtag1,
&u(1,ib,jb,kb), 1, m_send_type4[2*grid], m_neighbor[0], xtag1,
m_cartesian_communicator, &status );
MPI_Sendrecv( &u(1,ib+m_ppadding,jb,kb), 1, m_send_type4[2*grid], m_neighbor[0], xtag2,
&u(1,ie-(m_ppadding-1),jb,kb), 1, m_send_type4[2*grid], m_neighbor[1], xtag2,
m_cartesian_communicator, &status );
// Y-direction communication
MPI_Sendrecv( &u(1,ib,je-(2*m_ppadding-1),kb), 1, m_send_type4[2*grid+1], m_neighbor[3], ytag1,
&u(1,ib,jb,kb), 1, m_send_type4[2*grid+1], m_neighbor[2], ytag1,
m_cartesian_communicator, &status );
MPI_Sendrecv( &u(1,ib,jb+m_ppadding,kb), 1, m_send_type4[2*grid+1], m_neighbor[2], ytag2,
&u(1,ib,je-(m_ppadding-1),kb), 1, m_send_type4[2*grid+1], m_neighbor[3], ytag2,
m_cartesian_communicator, &status );
}
}
//-----------------------------------------------------------------------
void EW::cartesian_bc_forcing( float_sw4 t, vector<float_sw4**> & a_BCForcing,
vector<Source*>& a_sources )
// assign the boundary forcing arrays a_BCForcing[g][side]
{
for(int g=0 ; g<mNumberOfGrids; g++ )
{
if( m_point_source_test )
{
for( int side=0 ; side < 6 ; side++ )
if( m_bcType[g][side] == bDirichlet )
get_exact_point_source( a_BCForcing[g][side], t, g, *a_sources[0], &m_BndryWindow[g][6*side] );
else
for (int q=0; q<3*m_NumberOfBCPoints[g][side]; q++)
a_BCForcing[g][side][q] = 0;
}
else
{
// no boundary forcing
// we can do the same loop for all types of bc. For bParallel boundaries, numberOfBCPoints=0
for( int side=0 ; side < 6 ; side++ )
for( int q=0 ; q < 3*m_NumberOfBCPoints[g][side] ; q++ )
a_BCForcing[g][side][q] = 0.;
}
}
}
//-----------------------------------------------------------------------
void EW::setup_boundary_arrays( )
{
m_BndryWindow.resize(mNumberOfGrids);
m_NumberOfBCPoints.resize(mNumberOfGrids);
for (int g=0; g<mNumberOfGrids; g++ )
{
m_BndryWindow[g] = new int[36];
m_NumberOfBCPoints[g] = new int[6];
for(int side=0; side<6 ; side++ )
{
m_NumberOfBCPoints[g][side] = 0;
for (int qq=0; qq<6; qq+=2) // 0, 2, 4
m_BndryWindow[g][qq + side*6]= 999;
for (int qq=1; qq<6; qq+=2) // 1, 3, 5
m_BndryWindow[g][qq + side*6]= -999;
}
int wind[6];
for(int side=0; side<6 ; side++ )
{
if (m_bcType[g][side] == bStressFree || m_bcType[g][side] == bDirichlet ||
m_bcType[g][side] == bSuperGrid || m_bcType[g][side] == bPeriodic)
{
// modify the window for stress free bc to only hold one plane
if (m_bcType[g][side] == bStressFree)
{
side_plane( g, side, wind, 1 );
// when calling side_plane with nGhost=1, you get the outermost grid plane
// for Free surface conditions, we apply the forcing on the boundary itself, i.e., just
// inside the ghost points
// add/subtract the ghost point offset
if( side == 0 )
{
wind[0] += m_ghost_points; wind[1] = wind[0];
}
else if( side == 1 )
{
wind[0] -= m_ghost_points; wind[1] = wind[0];
}
else if( side == 2 )
{
wind[2] += m_ghost_points; wind[3] = wind[2];
}
else if( side == 3 )
{
wind[2] -= m_ghost_points; wind[3] = wind[2];
}
else if( side == 4 )
{
wind[4] += m_ghost_points;
wind[5] = wind[4];
}
else
{
wind[4] -= m_ghost_points;
wind[5] = wind[4];
}
}
else // for Dirichlet, super grid, and periodic conditions, we
// apply the forcing directly on the ghost points
{
side_plane( g, side, wind, m_ghost_points );
}
int npts = (wind[5]-wind[4]+1)*
(wind[3]-wind[2]+1)*
(wind[1]-wind[0]+1);
for (int qq=0; qq<6; qq++)
m_BndryWindow[g][qq+side*6]=wind[qq];
m_NumberOfBCPoints[g][side] = npts;
}
}
}
}
//-----------------------------------------------------------------------
void EW::side_plane( int g, int side, int wind[6], int nGhost )
{
wind[0] = m_iStart[g];
wind[1] = m_iEnd[g];
wind[2] = m_jStart[g];
wind[3] = m_jEnd[g];
wind[4] = m_kStart[g];
wind[5] = m_kEnd[g];
if( side == 0 )
wind[1] = wind[0] + (nGhost-1);
else if( side == 1 )
wind[0] = wind[1] - (nGhost-1);
else if( side == 2 )
wind[3] = wind[2] + (nGhost-1);
else if( side == 3 )
wind[2] = wind[3] - (nGhost-1);
else if( side == 4 )
wind[5] = wind[4] + (nGhost-1);
else
wind[4] = wind[5] - (nGhost-1);
}
//-----------------------------------------------------------------------
void EW::enforceBC( vector<Sarray> & a_U, vector<Sarray>& a_Mu, vector<Sarray>& a_Lambda,
float_sw4 t, vector<float_sw4**> & a_BCForcing )
{
float_sw4 om=0, ph=0, cv=0;
for(int g=0 ; g<mNumberOfGrids; g++ )
{
// int topo=topographyExists() && g == mNumberOfGrids-1;
if( m_corder )
bcfortsg_indrev( m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g], m_kStart[g], m_kEnd[g],
m_BndryWindow[g], m_global_nx[g], m_global_ny[g], m_global_nz[g], a_U[g].c_ptr(),
mGridSize[g], m_bcType[g], m_sbop, a_Mu[g].c_ptr(), a_Lambda[g].c_ptr(),
t, a_BCForcing[g][0], a_BCForcing[g][1], a_BCForcing[g][2],
a_BCForcing[g][3], a_BCForcing[g][4], a_BCForcing[g][5],
om, ph, cv, m_sg_str_x[g], m_sg_str_y[g] );
else
bcfortsg( m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g], m_kStart[g], m_kEnd[g],
m_BndryWindow[g], m_global_nx[g], m_global_ny[g], m_global_nz[g], a_U[g].c_ptr(),
mGridSize[g], m_bcType[g], m_sbop, a_Mu[g].c_ptr(), a_Lambda[g].c_ptr(),
t, a_BCForcing[g][0], a_BCForcing[g][1], a_BCForcing[g][2],
a_BCForcing[g][3], a_BCForcing[g][4], a_BCForcing[g][5],
om, ph, cv, m_sg_str_x[g], m_sg_str_y[g] );
if( m_topography_exists && g == mNumberOfGrids-1 && m_bcType[g][4] == bStressFree )
{
int side = 5;
if( m_corder )
freesurfcurvisg_rev( m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g], m_kStart[g], m_kEnd[g],
m_global_nz[g], side, a_U[g].c_ptr(), a_Mu[g].c_ptr(),
a_Lambda[g].c_ptr(), mMetric.c_ptr(), m_sbop,
a_BCForcing[g][4], m_sg_str_x[g], m_sg_str_y[g] );
else
freesurfcurvisg( m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g], m_kStart[g], m_kEnd[g],
m_global_nz[g], side, a_U[g].c_ptr(), a_Mu[g].c_ptr(),
a_Lambda[g].c_ptr(), mMetric.c_ptr(), m_sbop,
a_BCForcing[g][4], m_sg_str_x[g], m_sg_str_y[g] );
}
}
enforceCartTopo( a_U );
}
//-----------------------------------------------------------------------
void EW::enforceCartTopo( vector<Sarray>& a_U )
{
// interface between curvilinear and top Cartesian grid
if (m_topography_exists)
{
int nc = 3;
int g = mNumberOfCartesianGrids-1;
int gc = mNumberOfGrids-1;
int q, i, j;
// inject solution values between lower boundary of gc and upper boundary of g
for( j = m_jStart[g] ; j <= m_jEnd[g]; j++ )
for( i = m_iStart[g]; i <= m_iEnd[g]; i++ )
{
// assign ghost points in the Cartesian grid
for (q = 0; q < m_ghost_points; q++) // only once when m_ghost_points==1
{
for( int c = 1; c <= nc ; c++ )
a_U[g](c,i,j,m_kStart[g] + q) = a_U[gc](c,i,j,m_kEnd[gc]-2*m_ghost_points + q);
}
// assign ghost points in the Curvilinear grid
for (q = 0; q <= m_ghost_points; q++) // twice when m_ghost_points==1 (overwrites solution on the common grid line)
{
for( int c = 1; c <= nc ; c++ )
a_U[gc](c,i,j,m_kEnd[gc]-q) = a_U[g](c,i,j,m_kStart[g]+2*m_ghost_points - q);
}
}
}
}
//-----------------------------------------------------------------------
void EW::addSuperGridDamping(vector<Sarray> & a_Up, vector<Sarray> & a_U,
vector<Sarray> & a_Um, vector<Sarray> & a_Rho )
{
for(int g=0 ; g<mNumberOfCartesianGrids; g++ )
{
if( m_sg_damping_order == 4 )
{
if( m_corder )
addsgd4fort_indrev( m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g], m_kStart[g], m_kEnd[g],
a_Up[g].c_ptr(), a_U[g].c_ptr(), a_Um[g].c_ptr(), a_Rho[g].c_ptr(),
m_sg_dc_x[g], m_sg_dc_y[g], m_sg_dc_z[g], m_sg_str_x[g],
m_sg_str_y[g], m_sg_str_z[g], m_sg_corner_x[g], m_sg_corner_y[g],
m_sg_corner_z[g], m_supergrid_damping_coefficient );
else
addsgd4fort( m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g], m_kStart[g], m_kEnd[g],
a_Up[g].c_ptr(), a_U[g].c_ptr(), a_Um[g].c_ptr(), a_Rho[g].c_ptr(),
m_sg_dc_x[g], m_sg_dc_y[g], m_sg_dc_z[g], m_sg_str_x[g],
m_sg_str_y[g], m_sg_str_z[g], m_sg_corner_x[g], m_sg_corner_y[g],
m_sg_corner_z[g], m_supergrid_damping_coefficient );
}
else if( m_sg_damping_order == 6 )
{
if( m_corder )
addsgd6fort_indrev( m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g], m_kStart[g], m_kEnd[g],
a_Up[g].c_ptr(), a_U[g].c_ptr(), a_Um[g].c_ptr(), a_Rho[g].c_ptr(),
m_sg_dc_x[g], m_sg_dc_y[g], m_sg_dc_z[g], m_sg_str_x[g],
m_sg_str_y[g], m_sg_str_z[g], m_sg_corner_x[g], m_sg_corner_y[g],
m_sg_corner_z[g], m_supergrid_damping_coefficient );
else
addsgd6fort( m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g], m_kStart[g], m_kEnd[g],
a_Up[g].c_ptr(), a_U[g].c_ptr(), a_Um[g].c_ptr(), a_Rho[g].c_ptr(),
m_sg_dc_x[g], m_sg_dc_y[g], m_sg_dc_z[g], m_sg_str_x[g],
m_sg_str_y[g], m_sg_str_z[g], m_sg_corner_x[g], m_sg_corner_y[g],
m_sg_corner_z[g], m_supergrid_damping_coefficient );
}
}
if( m_topography_exists )
{
int g=mNumberOfGrids-1;
if( m_sg_damping_order == 4 )
{
if( m_corder )
addsgd4cfort_indrev( m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g], m_kStart[g], m_kEnd[g],
a_Up[g].c_ptr(), a_U[g].c_ptr(), a_Um[g].c_ptr(), a_Rho[g].c_ptr(),
m_sg_dc_x[g], m_sg_dc_y[g], m_sg_str_x[g], m_sg_str_y[g],
mJ.c_ptr(), m_sg_corner_x[g], m_sg_corner_y[g],
m_supergrid_damping_coefficient );
else
addsgd4cfort( m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g], m_kStart[g], m_kEnd[g],
a_Up[g].c_ptr(), a_U[g].c_ptr(), a_Um[g].c_ptr(), a_Rho[g].c_ptr(),
m_sg_dc_x[g], m_sg_dc_y[g], m_sg_str_x[g], m_sg_str_y[g],
mJ.c_ptr(), m_sg_corner_x[g], m_sg_corner_y[g], m_supergrid_damping_coefficient );
}
else if( m_sg_damping_order == 6 )
{
if( m_corder )
addsgd6cfort_indrev( m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g], m_kStart[g], m_kEnd[g],
a_Up[g].c_ptr(), a_U[g].c_ptr(), a_Um[g].c_ptr(), a_Rho[g].c_ptr(),
m_sg_dc_x[g], m_sg_dc_y[g], m_sg_str_x[g], m_sg_str_y[g],
mJ.c_ptr(), m_sg_corner_x[g], m_sg_corner_y[g],
m_supergrid_damping_coefficient );
else
addsgd6cfort( m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g], m_kStart[g], m_kEnd[g],
a_Up[g].c_ptr(), a_U[g].c_ptr(), a_Um[g].c_ptr(), a_Rho[g].c_ptr(),
m_sg_dc_x[g], m_sg_dc_y[g], m_sg_str_x[g], m_sg_str_y[g],
mJ.c_ptr(), m_sg_corner_x[g], m_sg_corner_y[g],
m_supergrid_damping_coefficient );
}
}
}
//-----------------------------------------------------------------------
void EW::printTime( int cycle, float_sw4 t, bool force ) const
{
if (!mQuiet && m_myrank == 0 && (force || mPrintInterval == 1 ||
(cycle % mPrintInterval) == 1 ||
cycle == 1) )
// string big enough for >1 million time steps
cout << "Time step " << cycle << " t= " << t << endl;
}
//-----------------------------------------------------------------------
bool EW::exactSol( float_sw4 a_t, vector<Sarray> & a_U, vector<Source*>& sources )
{
bool retval=false;
if( m_point_source_test )
{
for( int g=0 ; g < mNumberOfGrids; g++ )
{
size_t npts = static_cast<size_t>(m_iEnd[g]-m_iStart[g]+1)*(m_jEnd[g]-m_jStart[g]+1)*(m_kEnd[g]-m_kStart[g]+1);
float_sw4* utmp = new float_sw4[npts*3];
// get_exact_point_source( a_U[g].c_ptr(), a_t, g, *sources[0] );
get_exact_point_source( utmp, a_t, g, *sources[0] );
a_U[g].assign( utmp, 0 );
delete[] utmp;
}
retval = true;
}
return retval;
}
//-----------------------------------------------------------------------
// smooth wave for time dependence to test point force term with
float_sw4 EW::SmoothWave(float_sw4 t, float_sw4 R, float_sw4 c)
{
float_sw4 temp = R;
float_sw4 c0 = 2187./8., c1 = -10935./8., c2 = 19683./8., c3 = -15309./8., c4 = 2187./4.;
if( (t-R/c) > 0 && (t-R/c) < 1 )
temp = (c0*pow(t-R/c,3)+c1*pow(t-R/c,4)+c2*pow(t-R/c,5)+c3*pow(t-R/c,6)+c4*pow(t-R/c,7));
else
temp = 0;
return temp;
}
//-----------------------------------------------------------------------
// very smooth bump for time dependence for further testing of point force
float_sw4 EW::VerySmoothBump(float_sw4 t, float_sw4 R, float_sw4 c)
{
float_sw4 temp = R;
float_sw4 c0 = 1024, c1 = -5120, c2 = 10240, c3 = -10240, c4 = 5120, c5 = -1024;
if( (t-R/c) > 0 && (t-R/c) < 1 )
temp = (c0*pow(t-R/c,5)+c1*pow(t-R/c,6)+c2*pow(t-R/c,7)+c3*pow(t-R/c,8)+c4*pow(t-R/c,9)+c5*pow(t-R/c,10));
else
temp = 0;
return temp;
}
//-----------------------------------------------------------------------
// C6 smooth bump for time dependence for further testing of point force
float_sw4 EW::C6SmoothBump(float_sw4 t, float_sw4 R, float_sw4 c)
{
float_sw4 retval = 0;
if( (t-R/c) > 0 && (t-R/c) < 1 )
retval = 51480.0*pow( (t-R/c)*(1-t+R/c), 7 );
return retval;
}
//-----------------------------------------------------------------------
// derivative of smooth wave
float_sw4 EW::d_SmoothWave_dt(float_sw4 t, float_sw4 R, float_sw4 c)
{
float_sw4 temp = R;
float_sw4 c0 = 2187./8., c1 = -10935./8., c2 = 19683./8., c3 = -15309./8., c4 = 2187./4.;
if( (t-R/c) > 0 && (t-R/c) < 1 )
temp = (3*c0*pow(t-R/c,2)+4*c1*pow(t-R/c,3)+5*c2*pow(t-R/c,4)+6*c3*pow(t-R/c,5)+7*c4*pow(t-R/c,6));
else
temp = 0;
return temp;
}
//-----------------------------------------------------------------------
// very smooth bump for time dependence to further testing of point force
float_sw4 EW::d_VerySmoothBump_dt(float_sw4 t, float_sw4 R, float_sw4 c)
{
float_sw4 temp = R;
float_sw4 c0 = 1024, c1 = -5120, c2 = 10240, c3 = -10240, c4 = 5120, c5 = -1024;
// temp = where ( (t-R/c) > 0 && (t-R/c) < 1, (5*c0*pow(t-R/c,4)+6*c1*pow(t-R/c,5)+7*c2*pow(t-R/c,6)+8*c3*pow(t-R/c,7)+9*c4*pow(t-R/c,8))+10*c5*pow(t-R/c,9), 0);
if( (t-R/c) > 0 && (t-R/c) < 1 )
temp = (5*c0*pow(t-R/c,4)+6*c1*pow(t-R/c,5)+7*c2*pow(t-R/c,6)+8*c3*pow(t-R/c,7)+9*c4*pow(t-R/c,8))+10*c5*pow(t-R/c,9);
else
temp = 0;
return temp;
}
//-----------------------------------------------------------------------
// C6 smooth bump for time dependence to further testing of point force
float_sw4 EW::d_C6SmoothBump_dt(float_sw4 t, float_sw4 R, float_sw4 c)
{
float_sw4 retval=0;
if( (t-R/c) > 0 && (t-R/c) < 1 )
retval = 51480.0*7*(1-2*(t-R/c))*pow((t-R/c)*(1-t+R/c),6);
return retval;
}
//-----------------------------------------------------------------------
// Primitive function (for T) of SmoothWave(t-T)*T
float_sw4 EW::SWTP(float_sw4 Lim, float_sw4 t)
{
float_sw4 temp = Lim;
float_sw4 c0 = 2187./8., c1 = -10935./8., c2 = 19683./8., c3 = -15309./8., c4 = 2187./4.;
temp = (pow(t,3)*(c0 + c1*t + c2*pow(t,2) + c3*pow(t,3) + c4*pow(t,4))*pow(Lim,2))/2. -
(pow(t,2)*(3*c0 + 4*c1*t + 5*c2*pow(t,2) + 6*c3*pow(t,3) + 7*c4*pow(t,4))*pow(Lim,3))/3. +
(t*(3*c0 + 6*c1*t + 10*c2*pow(t,2) + 15*c3*pow(t,3) + 21*c4*pow(t,4))*pow(Lim,4))/4. +
((-c0 - 4*c1*t - 10*c2*pow(t,2) - 20*c3*pow(t,3) - 35*c4*pow(t,4))*pow(Lim,5))/5. +
((c1 + 5*c2*t + 15*c3*pow(t,2) + 35*c4*pow(t,3))*pow(Lim,6))/6. +
((-c2 - 6*c3*t - 21*c4*pow(t,2))*pow(Lim,7))/7. + ((c3 + 7*c4*t)*pow(Lim,8))/8. - (c4*pow(Lim,9))/9.;
return temp;
}
//-----------------------------------------------------------------------
// Primitive function (for T) of VerySmoothBump(t-T)*T
float_sw4 EW::VSBTP(float_sw4 Lim, float_sw4 t)
{
float_sw4 temp = Lim;
float_sw4 f = 1024., g = -5120., h = 10240., i = -10240., j = 5120., k = -1024.;
temp = (pow(Lim,11)*(-25200*k*t-2520*j)+2310*k*pow(Lim,12)+(124740*k*pow(t,2)
+24948*j*t+2772*i)*pow(Lim,10)+(-369600*k*pow(t,3)-110880*j*pow(t,2)-24640*i*t-3080*h)*pow(Lim,9)+(727650*k*pow(t,4)+291060*j*pow(t,3)+97020*i*pow(t,2)+24255*h*t+3465*g)*pow(Lim,8)+(-997920*k*pow(t,5)-498960*j*pow(t,4)-221760*i*pow(t,3)-83160*h*pow(t,2)-23760*g*t-3960*f)*pow(Lim,7)+(970200*k*pow(t,6)+582120*j*pow(t,5)+323400*i*pow(t,4)+161700*h*pow(t,3)+69300*g*pow(t,2)+23100*f*t)*pow(Lim,6)+(-665280*k*pow(t,7)-465696*j*pow(t,6)-310464*i*pow(t,5)-194040*h*pow(t,4)-110880*g*pow(t,3)-55440*f*pow(t,2))*pow(Lim,5)+
(311850*k*pow(t,8)+249480*j*pow(t,7)+194040*i*pow(t,6)+145530*h*pow(t,5)+103950*g*pow(t,4)+69300*f*pow(t,3))*pow(Lim,4)+(-92400*
k*pow(t,9)-83160*j*pow(t,8)-73920*i*pow(t,7)-64680*h*pow(t,6)-55440*g*pow(t,5)-46200*f*pow(t,4))*pow(Lim,3)+(13860*k*pow(t,10)+13860*j*pow(t,9)+13860*i*pow(t,8)+13860*h*pow(t,7)+13860*g*pow(t,6)+13860*f*pow(t,5))*pow(Lim,2))/27720.0;
return temp;
}
//-----------------------------------------------------------------------
// Primitive function (for T) of C6SmoothBump(t-T)*T
float_sw4 EW::C6SBTP(float_sw4 Lim, float_sw4 t)
{
float_sw4 x = t-Lim;
return pow(x,8)*(-3217.5*pow(x,8)+3432.0*(7+t)*pow(x,7)-25740.0*(3+t)*pow(x,6)
+27720.0*(5+3*t)*pow(x,5)-150150.0*(t+1)*x*x*x*x +
32760.0*(3+5*t)*x*x*x-36036.0*(1+3*t)*x*x+5720.0*(1+7*t)*x-6435.0*t);
}
//-----------------------------------------------------------------------
// Integral of H(t-T)*H(1-t+T)*SmoothWave(t-T)*T from R/alpha to R/beta
float_sw4 EW::SmoothWave_x_T_Integral(float_sw4 t, float_sw4 R, float_sw4 alpha, float_sw4 beta)
{
float_sw4 temp = R;
float_sw4 lowL, hiL;
// lowL = where(R / alpha > t - 1, R/alpha, t - 1); hiL = where(R / beta < t, R / beta, t);
if( (R / alpha > t - 1 ) )
lowL = R/alpha;
else
lowL = t-1;
if( R / beta < t )
hiL = R/beta;
else
hiL = t;
// temp = where (lowL < t && hiL > t - 1, SWTP(hiL, t) - SWTP(lowL, t), 0.0);
if( lowL < t && hiL > t - 1 )
temp = SWTP(hiL, t) - SWTP(lowL, t);
else
temp = 0;
return temp;
}
//-----------------------------------------------------------------------
// Integral of H(t-T)*H(1-t+T)*VerySmoothBump(t-T)*T from R/alpha to R/beta
float_sw4 EW::VerySmoothBump_x_T_Integral(float_sw4 t, float_sw4 R, float_sw4 alpha, float_sw4 beta)
{
float_sw4 temp = R;
float_sw4 lowL, hiL;
// lowL = where(R / alpha > t - 1, R/alpha, t - 1); hiL = where(R / beta < t, R / beta, t);
if( R / alpha > t - 1 )
lowL = R/alpha;
else
lowL = t-1;
if( R / beta < t )
hiL = R/beta;
else
hiL = t;
// temp = where (lowL < t && hiL > t - 1, VSBTP(hiL, t) - VSBTP(lowL, t), 0.0);
if( lowL < t && hiL > t - 1 )
temp = VSBTP(hiL, t) - VSBTP(lowL, t);
else
temp = 0;
return temp;
}
//-----------------------------------------------------------------------
// Integral of H(t-T)*H(1-t+T)*C6SmoothBump(t-T)*T from R/alpha to R/beta
float_sw4 EW::C6SmoothBump_x_T_Integral(float_sw4 t, float_sw4 R, float_sw4 alpha, float_sw4 beta)
{
float_sw4 temp = R;
float_sw4 lowL, hiL;
// lowL = where(R / alpha > t - 1, R/alpha, t - 1); hiL = where(R / beta < t, R / beta, t);
if( R / alpha > t - 1 )
lowL = R/alpha;
else
lowL = t-1;
if( R / beta < t )
hiL = R/beta;
else
hiL = t;
// temp = where (lowL < t && hiL > t - 1, VSBTP(hiL, t) - VSBTP(lowL, t), 0.0);
if( lowL < t && hiL > t - 1 )
temp = C6SBTP(hiL, t) - C6SBTP(lowL, t);
else
temp = 0;
return temp;
}
//-----------------------------------------------------------------------
float_sw4 EW::Gaussian(float_sw4 t, float_sw4 R, float_sw4 c, float_sw4 f )
{
float_sw4 temp = R;
temp = 1 /(f* sqrt(2*M_PI))*exp(-pow(t-R/c,2) / (2*f*f));
return temp;
}
//-----------------------------------------------------------------------
float_sw4 EW::d_Gaussian_dt(float_sw4 t, float_sw4 R, float_sw4 c, float_sw4 f)
{
float_sw4 temp = R;
temp = 1 /(f* sqrt(2*M_PI))*(-exp(-pow(t-R/c,2)/(2*f*f))*(t-R/c))/pow(f,2);
return temp;
}
//-----------------------------------------------------------------------
float_sw4 EW::Gaussian_x_T_Integral(float_sw4 t, float_sw4 R, float_sw4 f, float_sw4 alpha, float_sw4 beta)
{
float_sw4 temp = R;
temp = -0.5*t*(erf( (t-R/beta)/(sqrt(2.0)*f)) - erf( (t-R/alpha)/(sqrt(2.0)*f)) ) -
f/sqrt(2*M_PI)*( exp(-pow(t-R/beta,2)/(2*f*f) ) - exp( -pow(t-R/alpha,2)/(2*f*f) ) ) ;
return temp;
}
//-----------------------------------------------------------------------
void EW::get_exact_point_source( float_sw4* up, float_sw4 t, int g, Source& source,
int* wind )
{
timeDep tD;
if(!( source.getName() == "SmoothWave" || source.getName() == "VerySmoothBump" ||
source.getName() == "C6SmoothBump" || source.getName()== "Gaussian") )
{
cout << "EW::get_exact_point_source: Error, time dependency must be SmoothWave, VerySmoothBump, C6SmoothBump, or Gaussian, not "
<< source.getName() << endl;
return;
}
else if( source.getName() == "SmoothWave" )
tD = iSmoothWave;
else if( source.getName() == "VerySmoothBump" )
tD = iVerySmoothBump;
else if( source.getName() == "C6SmoothBump" )
tD = iC6SmoothBump;
else
tD = iGaussian;
// u.set_to_zero();
// Assume constant material, sample it in middle of domain
int imid = (m_iStart[g]+m_iEnd[g])/2;
int jmid = (m_jStart[g]+m_jEnd[g])/2;
int kmid = (m_kStart[g]+m_kEnd[g])/2;
float_sw4 rho = mRho[g](imid,jmid,kmid);
float_sw4 beta = sqrt( mMu[g](imid,jmid,kmid)/rho);
float_sw4 alpha = sqrt( (2*mMu[g](imid,jmid,kmid)+mLambda[g](imid,jmid,kmid))/rho);
float_sw4 x0 = source.getX0();
float_sw4 y0 = source.getY0();
float_sw4 z0 = source.getZ0();
float_sw4 fr=source.getFrequency();
float_sw4 time = (t-source.getOffset()) * source.getFrequency();
if( tD == iGaussian )
{
fr = 1/fr;
time = time*fr;
}
bool ismomentsource = source.isMomentSource();
float_sw4 fx, fy, fz;
float_sw4 mxx, myy, mzz, mxy, mxz, myz, m0;
if( !ismomentsource )
{
source.getForces( fx, fy, fz );
}
else
{
source.getMoments( mxx, mxy, mxz, myy, myz, mzz );
// m0 = source.getAmplitude();
m0 = 1;
}
float_sw4 h = mGridSize[g];
float_sw4 eps = 1e-3*h;
size_t ind = 0;
int imax, imin, jmax, jmin, kmax, kmin;
if( wind == 0 )
{
imin = m_iStart[g];
imax = m_iEnd[g];
jmin = m_jStart[g];
jmax = m_jEnd[g];
kmin = m_kStart[g];
kmax = m_kEnd[g];
}
else
{
imin = wind[0];
imax = wind[1];
jmin = wind[2];
jmax = wind[3];
kmin = wind[4];
kmax = wind[5];
}
// Note: Use of ind, assumes loop is over the domain over which u is defined.
// for( int k=m_kStart[g] ; k <= m_kEnd[g] ; k++ )
// for( int j=m_jStart[g] ; j <= m_jEnd[g] ; j++ )
// for( int i=m_iStart[g] ; i <= m_iEnd[g] ; i++ )
for( int k=kmin ; k <= kmax ; k++ )
for( int j=jmin ; j <= jmax ; j++ )
for( int i=imin ; i <= imax ; i++ )
{
float_sw4 x,y,z;
x = (i-1)*h;
y = (j-1)*h;
z = (k-1)*h + m_zmin[g];
if( !ismomentsource )
{
float_sw4 R = sqrt( (x - x0)*(x - x0) + (y - y0)*(y - y0) + (z - z0)*(z - z0) );
if( R < eps )
up[3*ind] = up[3*ind+1] = up[3*ind+2] = 0;
else
{
float_sw4 A, B;
if (tD == iSmoothWave)
{
A = ( 1/pow(alpha,2) * SmoothWave(time, fr*R, alpha) - 1/pow(beta,2) * SmoothWave(time, fr*R, beta) +
3/pow(fr*R,2) * SmoothWave_x_T_Integral(time, fr*R, alpha, beta) ) / (4*M_PI*rho*R*R*R) ;
B = ( 1/pow(beta,2) * SmoothWave(time, fr*R, beta) -
1/pow(fr*R,2) * SmoothWave_x_T_Integral(time, fr*R, alpha, beta) ) / (4*M_PI*rho*R) ;
}
else if (tD == iVerySmoothBump)
{
A = ( 1/pow(alpha,2) * VerySmoothBump(time, fr*R, alpha) - 1/pow(beta,2) * VerySmoothBump(time, fr*R, beta) +
3/pow(fr*R,2) * VerySmoothBump_x_T_Integral(time, fr*R, alpha, beta) ) / (4*M_PI*rho*R*R*R) ;
B = ( 1/pow(beta,2) * VerySmoothBump(time, fr*R, beta) -
1/pow(fr*R,2) * VerySmoothBump_x_T_Integral(time, fr*R, alpha, beta) ) / (4*M_PI*rho*R) ;
}
else if (tD == iC6SmoothBump)
{
A = ( 1/pow(alpha,2) * C6SmoothBump(time, fr*R, alpha) - 1/pow(beta,2) * C6SmoothBump(time, fr*R, beta) +
3/pow(fr*R,2) * C6SmoothBump_x_T_Integral(time, fr*R, alpha, beta) ) / (4*M_PI*rho*R*R*R) ;
B = ( 1/pow(beta,2) * C6SmoothBump(time, fr*R, beta) -
1/pow(fr*R,2) * C6SmoothBump_x_T_Integral(time, fr*R, alpha, beta) ) / (4*M_PI*rho*R) ;
}
else if( tD == iGaussian )
{
A = ( 1/pow(alpha,2) * Gaussian(time, R, alpha,fr) - 1/pow(beta,2) * Gaussian(time, R, beta,fr) +
3/pow(R,2) * Gaussian_x_T_Integral(time, R, fr, alpha, beta) ) / (4*M_PI*rho*R*R*R) ;
B = ( 1/pow(beta,2) * Gaussian(time, R, beta,fr) -
1/pow(R,2) * Gaussian_x_T_Integral(time, R, fr, alpha, beta) ) / (4*M_PI*rho*R) ;
}
up[3*ind] = ( (x - x0)*(x - x0)*fx + (x - x0)*(y - y0)*fy + (x - x0)*(z - z0)*fz )*A + fx*B;
up[3*ind+1] = ( (y - y0)*(x - x0)*fx + (y - y0)*(y - y0)*fy + (y - y0)*(z - z0)*fz )*A + fy*B;
up[3*ind+2] = ( (z - z0)*(x - x0)*fx + (z - z0)*(y - y0)*fy + (z - z0)*(z - z0)*fz )*A + fz*B;
}
}
else
{
up[3*ind] = up[3*ind+1] = up[3*ind+2] = 0;
// Here, ismomentsource == true
float_sw4 R = sqrt( (x - x0)*(x - x0) + (y - y0)*(y - y0) + (z - z0)*(z - z0) );
if( R < eps )
{
up[3*ind] = up[3*ind+1] = up[3*ind+2] = 0;
}
else
{
float_sw4 A, B, C, D, E;
if (tD == iSmoothWave)
{
A = SmoothWave(time, R, alpha);
B = SmoothWave(time, R, beta);
C = SmoothWave_x_T_Integral(time, R, alpha, beta);
D = d_SmoothWave_dt(time, R, alpha) / pow(alpha,3) / R;
E = d_SmoothWave_dt(time, R, beta) / pow(beta,3) / R;
}
else if (tD == iVerySmoothBump)
{
A = VerySmoothBump(time, R, alpha);
B = VerySmoothBump(time, R, beta);
C = VerySmoothBump_x_T_Integral(time, R, alpha, beta);
D = d_VerySmoothBump_dt(time, R, alpha) / pow(alpha,3) / R;
E = d_VerySmoothBump_dt(time, R, beta) / pow(beta,3) / R;
}
else if (tD == iC6SmoothBump)
{
A = C6SmoothBump(time, R, alpha);
B = C6SmoothBump(time, R, beta);
C = C6SmoothBump_x_T_Integral(time, R, alpha, beta);
D = d_C6SmoothBump_dt(time, R, alpha) / pow(alpha,3) / R;
E = d_C6SmoothBump_dt(time, R, beta) / pow(beta,3) / R;
}
else if (tD == iGaussian)
{
A = Gaussian(time, R, alpha,fr);
B = Gaussian(time, R, beta,fr);
C = Gaussian_x_T_Integral(time, R, fr,alpha, beta);
D = d_Gaussian_dt(time, R, alpha,fr) / pow(alpha,3) / R;
E = d_Gaussian_dt(time, R, beta,fr) / pow(beta,3) / R;
}
up[3*ind] +=
// m_xx*G_xx,x
+ m0*mxx/(4*M_PI*rho)*
(
+ 3*(x-x0)*(x-x0)*(x-x0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2))
- 2*(x-x0) / pow(R,3) * (A/pow(alpha,2) - B/pow(beta,2))
+ 3*(x-x0)*(x-x0) / pow(R,5) * ((x-x0)*A/pow(alpha,2) - (x-x0)*B/pow(beta,2))
+ ( 15*(x-x0)*(x-x0)*(x-x0) / pow(R,7) - 6*(x-x0) / pow(R,5) ) * C
+ (x-x0)*(x-x0) / pow(R,3)* ((x-x0)*D - (x-x0)*E)
- 1 / pow(R,3) * ((x-x0)*A/pow(alpha,2) - (x-x0)*B/pow(beta,2))
- 3*(x-x0) / pow(R,5) * C
+ (x-x0) / (pow(R,3)*pow(beta,2)) * B
+ 1 / R * (x-x0)*E
);
up[3*ind] +=
// m_yy*G_xy,y
+ m0*myy/(4*M_PI*rho)*
(
+ 3*(x-x0)*(y-y0)*(y-y0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2))
- (x-x0) / pow(R,3) * (A/pow(alpha,2) - B/pow(beta,2))
+ (x-x0)*(y-y0) / pow(R,3)* ((y-y0)*D - (y-y0)*E)
+ 3*(x-x0)*(y-y0) / pow(R,5) * ((y-y0)*A/pow(alpha,2) - (y-y0)*B/pow(beta,2))
+ ( 15*(x-x0)*(y-y0)*(y-y0) / pow(R,7) - 3*(x-x0) / pow(R,5) ) * C
);
up[3*ind] +=
// m_zz*G_xz,z
+ m0*mzz/(4*M_PI*rho)*
(
+ 3*(x-x0)*(z-z0)*(z-z0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2))
- (x-x0) / pow(R,3) * (A/pow(alpha,2) - B/pow(beta,2))
+ (x-x0)*(z-z0) / pow(R,3)* ((z-z0)*D - (z-z0)*E)
+ 3*(x-x0)*(z-z0) / pow(R,5) * ((z-z0)*A/pow(alpha,2) - (z-z0)*B/pow(beta,2))
+ ( 15*(x-x0)*(z-z0)*(z-z0) / pow(R,7) - 3*(x-x0) / pow(R,5) ) * C
);
up[3*ind] +=
// m_xy*G_xy,x
+ m0*mxy/(4*M_PI*rho)*
(
+ 3*(x-x0)*(x-x0)*(y-y0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2))
- (y-y0) / pow(R,3) * (A/pow(alpha,2) - B/pow(beta,2))
+ (x-x0)*(y-y0) / pow(R,3)* ((x-x0)*D - (x-x0)*E)
+ 3*(x-x0)*(y-y0) / pow(R,5) * ((x-x0)*A/pow(alpha,2) - (x-x0)*B/pow(beta,2))
+ ( 15*(x-x0)*(x-x0)*(y-y0) / pow(R,7) - 3*(y-y0) / pow(R,5) ) * C
);
up[3*ind] +=
// m_xy*G_xx,y
+ m0*mxy/(4*M_PI*rho)*
(
+ 3*(x-x0)*(x-x0)*(y-y0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2))
+ 3*(x-x0)*(x-x0) / pow(R,5) * ((y-y0)*A/pow(alpha,2) - (y-y0)*B/pow(beta,2))
+ 15*(x-x0)*(x-x0)*(y-y0) / pow(R,7) * C
+ (x-x0)*(x-x0) / pow(R,3)* ((y-y0)*D - (y-y0)*E)
- 1 / pow(R,3) * ((y-y0)*A/pow(alpha,2) - (y-y0)*B/pow(beta,2))
- 3*(y-y0) / pow(R,5) * C
+ (y-y0) / (pow(R,3)*pow(beta,2)) * B
+ 1 / R * (y-y0)*E
);
up[3*ind] +=
// m_xz*G_xz,x
+ m0*mxz/(4*M_PI*rho)*
(
+ 3*(x-x0)*(x-x0)*(z-z0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2))
- (z-z0) / pow(R,3) * (A/pow(alpha,2) - B/pow(beta,2))
+ (x-x0)*(z-z0) / pow(R,3)* ((x-x0)*D - (x-x0)*E)
+ 3*(x-x0)*(z-z0) / pow(R,5) * ((x-x0)*A/pow(alpha,2) - (x-x0)*B/pow(beta,2))
+ ( 15*(x-x0)*(x-x0)*(z-z0) / pow(R,7) - 3*(z-z0) / pow(R,5) ) * C
);
up[3*ind] +=
// m_yz*G_xz,y
+ m0*myz/(4*M_PI*rho)*
(
+ 3*(x-x0)*(y-y0)*(z-z0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2))
+ (x-x0)*(z-z0) / pow(R,3)* ((y-y0)*D - (y-y0)*E)
+ 3*(x-x0)*(z-z0) / pow(R,5) * ((y-y0)*A/pow(alpha,2) - (y-y0)*B/pow(beta,2))
+ 15*(x-x0)*(y-y0)*(z-z0) / pow(R,7) * C
);
up[3*ind] +=
// m_xz*G_xx,z
+ m0*mxz/(4*M_PI*rho)*
(
+ 3*(x-x0)*(x-x0)*(z-z0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2))
+ 3*(x-x0)*(x-x0) / pow(R,5) * ((z-z0)*A/pow(alpha,2) - (z-z0)*B/pow(beta,2))
+ 15*(x-x0)*(x-x0)*(z-z0) / pow(R,7) * C
+ (x-x0)*(x-x0) / pow(R,3)* ((z-z0)*D - (z-z0)*E)
- 1 / pow(R,3) * ((z-z0)*A/pow(alpha,2) - (z-z0)*B/pow(beta,2))
- 3*(z-z0) / pow(R,5) * C
+ (z-z0) / (pow(R,3)*pow(beta,2)) * B
+ 1 / R * (z-z0)*E
);
up[3*ind] +=
// m_yz*G_yx,z
+ m0*myz/(4*M_PI*rho)*
(
+ 3*(x-x0)*(y-y0)*(z-z0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2))
+ (x-x0)*(y-y0) / pow(R,3)* ((z-z0)*D - (z-z0)*E)
+ 3*(x-x0)*(y-y0) / pow(R,5) * ((z-z0)*A/pow(alpha,2) - (z-z0)*B/pow(beta,2))
+ 15*(x-x0)*(y-y0)*(z-z0) / pow(R,7) * C
);
//------------------------------------------------------------
up[3*ind+1] +=
// m_xx*G_xy,x
m0*mxx/(4*M_PI*rho)*
(
+ 3*(x-x0)*(x-x0)*(y-y0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2))
- (y-y0) / pow(R,3) * (A/pow(alpha,2) - B/pow(beta,2))
+ (x-x0)*(y-y0) / pow(R,3)* ((x-x0)*D - (x-x0)*E)
+ 3*(x-x0)*(y-y0) / pow(R,5) * ((x-x0)*A/pow(alpha,2) - (x-x0)*B/pow(beta,2))
+ ( 15*(x-x0)*(x-x0)*(y-y0) / pow(R,7) - 3*(y-y0) / pow(R,5) ) * C
);
up[3*ind+1] +=
// m_yy**G_yy,y
+ m0*myy/(4*M_PI*rho)*
(
+ 3*(y-y0)*(y-y0)*(y-y0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2))
- 2*(y-y0) / pow(R,3) * (A/pow(alpha,2) - B/pow(beta,2))
+ 3*(y-y0)*(y-y0) / pow(R,5) * ((y-y0)*A/pow(alpha,2) - (y-y0)*B/pow(beta,2))
+ ( 15*(y-y0)*(y-y0)*(y-y0) / pow(R,7) - 6*(y-y0) / pow(R,5) ) * C
+ (y-y0)*(y-y0) / pow(R,3)* ((y-y0)*D - (y-y0)*E)
- 1 / pow(R,3) * ((y-y0)*A/pow(alpha,2) - (y-y0)*B/pow(beta,2))
- 3*(y-y0) / pow(R,5) * C
+ (y-y0) / (pow(R,3)*pow(beta,2)) * B
+ 1 / R * (y-y0)*E
);
up[3*ind+1] +=
// m_zz*G_zy,z
+ m0*mzz/(4*M_PI*rho)*
(
+ 3*(z-z0)*(z-z0)*(y-y0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2))
- (y-y0) / pow(R,3) * (A/pow(alpha,2) - B/pow(beta,2))
+ (z-z0)*(y-y0) / pow(R,3)* ((z-z0)*D - (z-z0)*E)
+ 3*(z-z0)*(y-y0) / pow(R,5) * ((z-z0)*A/pow(alpha,2) - (z-z0)*B/pow(beta,2))
+ ( 15*(z-z0)*(z-z0)*(y-y0) / pow(R,7) - 3*(y-y0) / pow(R,5) ) * C
);
up[3*ind+1] +=
// m_xy*G_yy,x
+ m0*mxy/(4*M_PI*rho)*
(
+ 3*(x-x0)*(y-y0)*(y-y0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2))
+ 3*(y-y0)*(y-y0) / pow(R,5) * ((x-x0)*A/pow(alpha,2) - (x-x0)*B/pow(beta,2))
+ 15*(x-x0)*(y-y0)*(y-y0) / pow(R,7) * C
+ (y-y0)*(y-y0) / pow(R,3)* ((x-x0)*D - (x-x0)*E)
- 1 / pow(R,3) * ((x-x0)*A/pow(alpha,2) - (x-x0)*B/pow(beta,2))
- 3*(x-x0) / pow(R,5) * C
+ (x-x0) / (pow(R,3)*pow(beta,2)) * B
+ 1 / R * (x-x0)*E
);
up[3*ind+1] +=
// m_xz*G_zy,x
+ m0*mxz/(4*M_PI*rho)*
(
+ 3*(x-x0)*(y-y0)*(z-z0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2))
+ (y-y0)*(z-z0) / pow(R,3)* ((x-x0)*D - (x-x0)*E)
+ 3*(y-y0)*(z-z0) / pow(R,5) * ((x-x0)*A/pow(alpha,2) - (x-x0)*B/pow(beta,2))
+ 15*(x-x0)*(y-y0)*(z-z0) / pow(R,7) * C
);
up[3*ind+1] +=
// m_xy*G_xy,y
+ m0*mxy/(4*M_PI*rho)*
(
+ 3*(x-x0)*(y-y0)*(y-y0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2))
- (x-x0) / pow(R,3) * (A/pow(alpha,2) - B/pow(beta,2))
+ (x-x0)*(y-y0) / pow(R,3)* ((y-y0)*D - (y-y0)*E)
+ 3*(x-x0)*(y-y0) / pow(R,5) * ((y-y0)*A/pow(alpha,2) - (y-y0)*B/pow(beta,2))
+ ( 15*(x-x0)*(y-y0)*(y-y0) / pow(R,7) - 3*(x-x0) / pow(R,5) ) * C
);
up[3*ind+1] +=
// m_yz*G_zy,y
+ m0*myz/(4*M_PI*rho)*
(
+ 3*(z-z0)*(y-y0)*(y-y0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2))
- (z-z0) / pow(R,3) * (A/pow(alpha,2) - B/pow(beta,2))
+ (z-z0)*(y-y0) / pow(R,3)* ((y-y0)*D - (y-y0)*E)
+ 3*(z-z0)*(y-y0) / pow(R,5) * ((y-y0)*A/pow(alpha,2) - (y-y0)*B/pow(beta,2))
+ ( 15*(z-z0)*(y-y0)*(y-y0) / pow(R,7) - 3*(z-z0) / pow(R,5) ) * C
);
up[3*ind+1] +=
// m_xz*G_xy,z
+ m0*mxz/(4*M_PI*rho)*
(
+ 3*(x-x0)*(y-y0)*(z-z0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2))
+ (x-x0)*(y-y0) / pow(R,3)* ((z-z0)*D - (z-z0)*E)
+ 3*(x-x0)*(y-y0) / pow(R,5) * ((z-z0)*A/pow(alpha,2) - (z-z0)*B/pow(beta,2))
+ 15*(x-x0)*(y-y0)*(z-z0) / pow(R,7) * C
);
up[3*ind+1] +=
// m_yz*G_yy,z
+ m0*myz/(4*M_PI*rho)*
(
+ 3*(z-z0)*(y-y0)*(y-y0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2))
+ 3*(y-y0)*(y-y0) / pow(R,5) * ((z-z0)*A/pow(alpha,2) - (z-z0)*B/pow(beta,2))
+ 15*(z-z0)*(y-y0)*(y-y0) / pow(R,7) * C
+ (y-y0)*(y-y0) / pow(R,3)* ((z-z0)*D - (z-z0)*E)
- 1 / pow(R,3) * ((z-z0)*A/pow(alpha,2) - (z-z0)*B/pow(beta,2))
- 3*(z-z0) / pow(R,5) * C
+ (z-z0) / (pow(R,3)*pow(beta,2)) * B
+ 1 / R * (z-z0)*E
);
//------------------------------------------------------------
up[3*ind+2] +=
// m_xx*G_zx,x
+ m0*mxx/(4*M_PI*rho)*
(
+ 3*(x-x0)*(x-x0)*(z-z0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2))
- (z-z0) / pow(R,3) * (A/pow(alpha,2) - B/pow(beta,2))
+ (x-x0)*(z-z0) / pow(R,3)* ((x-x0)*D - (x-x0)*E)
+ 3*(x-x0)*(z-z0) / pow(R,5) * ((x-x0)*A/pow(alpha,2) - (x-x0)*B/pow(beta,2))
+ ( 15*(x-x0)*(x-x0)*(z-z0) / pow(R,7) - 3*(z-z0) / pow(R,5) ) * C
);
up[3*ind+2] +=
// m_yy*G_zy,y
+ m0*myy/(4*M_PI*rho)*
(
+ 3*(y-y0)*(y-y0)*(z-z0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2))
- (z-z0) / pow(R,3) * (A/pow(alpha,2) - B/pow(beta,2))
+ (y-y0)*(z-z0) / pow(R,3)* ((y-y0)*D - (y-y0)*E)
+ 3*(y-y0)*(z-z0) / pow(R,5) * ((y-y0)*A/pow(alpha,2) - (y-y0)*B/pow(beta,2))
+ ( 15*(y-y0)*(y-y0)*(z-z0) / pow(R,7) - 3*(z-z0) / pow(R,5) ) * C
);
up[3*ind+2] +=
// m_zz**G_zz,z
+ m0*mzz/(4*M_PI*rho)*
(
+ 3*(z-z0)*(z-z0)*(z-z0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2))
- 2*(z-z0) / pow(R,3) * (A/pow(alpha,2) - B/pow(beta,2))
+ 3*(z-z0)*(z-z0) / pow(R,5) * ((z-z0)*A/pow(alpha,2) - (z-z0)*B/pow(beta,2))
+ ( 15*(z-z0)*(z-z0)*(z-z0) / pow(R,7) - 6*(z-z0) / pow(R,5) ) * C
+ (z-z0)*(z-z0) / pow(R,3)* ((z-z0)*D - (z-z0)*E)
- 1 / pow(R,3) * ((z-z0)*A/pow(alpha,2) - (z-z0)*B/pow(beta,2))
- 3*(z-z0) / pow(R,5) * C
+ (z-z0) / (pow(R,3)*pow(beta,2)) * B
+ 1 / R * (z-z0)*E
);
up[3*ind+2] +=
// m_xy*G_zy,x
+ m0*mxy/(4*M_PI*rho)*
(
+ 3*(x-x0)*(y-y0)*(z-z0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2))
+ (y-y0)*(z-z0) / pow(R,3)* ((x-x0)*D - (x-x0)*E)
+ 3*(y-y0)*(z-z0) / pow(R,5) * ((x-x0)*A/pow(alpha,2) - (x-x0)*B/pow(beta,2))
+ 15*(x-x0)*(y-y0)*(z-z0) / pow(R,7) * C
);
up[3*ind+2] +=
// m_xz**G_zz,x
+ m0*mxz/(4*M_PI*rho)*
(
+ 3*(x-x0)*(z-z0)*(z-z0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2))
+ 3*(z-z0)*(z-z0) / pow(R,5) * ((x-x0)*A/pow(alpha,2) - (x-x0)*B/pow(beta,2))
+ 15*(x-x0)*(z-z0)*(z-z0) / pow(R,7) * C
+ (z-z0)*(z-z0) / pow(R,3)* ((x-x0)*D - (x-x0)*E)
- 1 / pow(R,3) * ((x-x0)*A/pow(alpha,2) - (x-x0)*B/pow(beta,2))
- 3*(x-x0) / pow(R,5) * C
+ (x-x0) / (pow(R,3)*pow(beta,2)) * B
+ 1 / R * (x-x0)*E
);
up[3*ind+2] +=
// m_xy*G_xz,y
+ m0*mxy/(4*M_PI*rho)*
(
+ 3*(x-x0)*(y-y0)*(z-z0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2))
+ (x-x0)*(z-z0) / pow(R,3)* ((y-y0)*D - (y-y0)*E)
+ 3*(x-x0)*(z-z0) / pow(R,5) * ((y-y0)*A/pow(alpha,2) - (y-y0)*B/pow(beta,2))
+ 15*(x-x0)*(y-y0)*(z-z0) / pow(R,7) * C
);
up[3*ind+2] +=
// m_yz*G_zz,y
+ m0*myz/(4*M_PI*rho)*
(
+ 3*(y-y0)*(z-z0)*(z-z0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2))
+ 3*(z-z0)*(z-z0) / pow(R,5) * ((y-y0)*A/pow(alpha,2) - (y-y0)*B/pow(beta,2))
+ 15*(y-y0)*(z-z0)*(z-z0) / pow(R,7) * C
+ (z-z0)*(z-z0) / pow(R,3)* ((y-y0)*D - (y-y0)*E)
- 1 / pow(R,3) * ((y-y0)*A/pow(alpha,2) - (y-y0)*B/pow(beta,2))
- 3*(y-y0) / pow(R,5) * C
+ (y-y0) / (pow(R,3)*pow(beta,2)) * B
+ 1 / R * (y-y0)*E
);
up[3*ind+2] +=
// m_xz*G_xz,z
+ m0*mxz/(4*M_PI*rho)*
(
+ 3*(x-x0)*(z-z0)*(z-z0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2))
- (x-x0) / pow(R,3) * (A/pow(alpha,2) - B/pow(beta,2))
+ (x-x0)*(z-z0) / pow(R,3)* ((z-z0)*D - (z-z0)*E)
+ 3*(x-x0)*(z-z0) / pow(R,5) * ((z-z0)*A/pow(alpha,2) - (z-z0)*B/pow(beta,2))
+ ( 15*(x-x0)*(z-z0)*(z-z0) / pow(R,7) - 3*(x-x0) / pow(R,5) ) * C
);
up[3*ind+2] +=
// m_yz*G_yz,z
+ m0*myz/(4*M_PI*rho)*
(
+ 3*(z-z0)*(z-z0)*(y-y0) / pow(R,5) * (A/pow(alpha,2) - B/pow(beta,2))
- (y-y0) / pow(R,3) * (A/pow(alpha,2) - B/pow(beta,2))
+ (z-z0)*(y-y0) / pow(R,3)* ((z-z0)*D - (z-z0)*E)
+ 3*(z-z0)*(y-y0) / pow(R,5) * ((z-z0)*A/pow(alpha,2) - (z-z0)*B/pow(beta,2))
+ ( 15*(z-z0)*(z-z0)*(y-y0) / pow(R,7) - 3*(y-y0) / pow(R,5) ) * C
);
}
}
ind++;
}
}
//-----------------------------------------------------------------------
void EW::normOfDifference( vector<Sarray> & a_Uex, vector<Sarray> & a_U, float_sw4 &diffInf,
float_sw4 &diffL2, float_sw4 &xInf, vector<Source*>& a_globalSources )
{
float_sw4 linfLocal=0, l2Local=0, diffInfLocal=0, diffL2Local=0;
float_sw4 xInfLocal=0, xInfGrid=0;
float_sw4 htop = mGridSize[mNumberOfGrids-1];
float_sw4 hbot = mGridSize[0];
for(int g=0 ; g<mNumberOfGrids; g++ )
{
float_sw4 radius =-1, x0=0, y0=0, z0=0;
float_sw4 h = mGridSize[g];
int nsgxy = (int)(0.5+m_sg_gp_thickness*htop/h);
int nsgz = (int)(0.5+m_sg_gp_thickness*hbot/h);
int imin, imax, jmin, jmax, kmin, kmax;
// Remove supergrid layers
if (mbcGlobalType[0] == bSuperGrid)
imin = max(m_iStartInt[g], nsgxy+1);
else
imin = m_iStartInt[g];
if (mbcGlobalType[1] == bSuperGrid)
imax = min(m_iEndInt[g], m_global_nx[g] - nsgxy);
else
imax = m_iEndInt[g];
if (mbcGlobalType[2] == bSuperGrid)
jmin = max(m_jStartInt[g], nsgxy+1);
else
jmin = m_jStartInt[g];
if (mbcGlobalType[3] == bSuperGrid)
jmax = min(m_jEndInt[g], m_global_ny[g] - nsgxy);
else
jmax = m_jEndInt[g];
// Can not test on global type when there is more than one grid in the z-direction
// if uppermost grid has layer on top boundary, the fine grid spacing is used for the s.g. layer width
if (m_bcType[g][4] == bSuperGrid)
kmin = max(m_kStartInt[g], nsgxy+1);
else
kmin = m_kStartInt[g];
// The lowermost grid has the s.g. layer width based on the spacing of the coarsest grid
if (m_bcType[g][5] == bSuperGrid)
kmax = min(m_kEndInt[g], m_global_nz[g] - nsgz);
else
kmax = m_kEndInt[g];
if( m_point_source_test )
{
radius = 4*h;
x0 = a_globalSources[0]->getX0();
y0 = a_globalSources[0]->getY0();
z0 = a_globalSources[0]->getZ0();
}
solerr3fort( m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g], m_kStart[g], m_kEnd[g],
h, a_Uex[g].c_ptr(), a_U[g].c_ptr(), linfLocal, l2Local, xInfGrid,
m_zmin[g], x0, y0, z0, radius, imin, imax, jmin, jmax, kmin, kmax );
if (linfLocal > diffInfLocal) diffInfLocal = linfLocal;
if (xInfGrid > xInfLocal) xInfLocal = xInfGrid;
diffL2Local += l2Local;
}
// communicate local results for global errors
MPI_Allreduce( &diffInfLocal, &diffInf, 1, m_mpifloat, MPI_MAX, m_cartesian_communicator );
MPI_Allreduce( &xInfLocal, &xInf, 1, m_mpifloat, MPI_MAX, m_cartesian_communicator );
MPI_Allreduce( &diffL2Local, &diffL2, 1, m_mpifloat, MPI_SUM, m_cartesian_communicator );
diffL2 = sqrt(diffL2);
}
//-----------------------------------------------------------------------
void EW::check_dimensions()
{
for( int g= 0 ; g < mNumberOfGrids ; g++ )
{
int nz=m_kEndInt[g]-m_kStartInt[g]+1;
int nzmin;
if( m_onesided[g][4] && m_onesided[g][5] )
nzmin = 12;
else if( m_onesided[g][4] || m_onesided[g][5] )
nzmin = 8;
else
nzmin = 1;
REQUIRE2( nz >= nzmin, "The number of grid points (not counting ghost pts) in the z-direction in grid " << g <<
" must be >= " << nzmin << " current value is " << nz );
int nx = m_iEndInt[g]-m_iStartInt[g]+1;
REQUIRE2( nx >= 1, "No grid points left (not counting ghost pts) in the x-direction in grid " << g );
int ny = m_jEndInt[g]-m_jStartInt[g]+1;
REQUIRE2( ny >= 1, "No grid points left (not counting ghost pts) in the y-direction in grid " << g );
}
}
//-----------------------------------------------------------------------
void EW::setup_supergrid( )
{
if (mVerbose >= 3 && m_myrank == 0 )
cout << "*** Inside setup_supergrid ***" << endl;
// check to see if there are any supergrid boundary conditions
m_use_supergrid = false;
for( int side=0 ; side < 6 ; side++ )
if( mbcGlobalType[side] == bSuperGrid )
m_use_supergrid = true;
if (mVerbose && m_myrank == 0 && m_use_supergrid)
cout << "Detected at least one boundary with supergrid conditions" << endl;
int gTop = mNumberOfCartesianGrids-1;
int gBot = 0;
m_supergrid_taper_z.resize(mNumberOfGrids);
m_supergrid_taper_x.define_taper( (mbcGlobalType[0] == bSuperGrid), 0.0, (mbcGlobalType[1] == bSuperGrid),
m_global_xmax, m_sg_gp_thickness*mGridSize[gTop] );
m_supergrid_taper_y.define_taper( (mbcGlobalType[2] == bSuperGrid), 0.0, (mbcGlobalType[3] == bSuperGrid),
m_global_ymax, m_sg_gp_thickness*mGridSize[gTop] );
if( mNumberOfGrids == 1 )
m_supergrid_taper_z[0].define_taper( !m_topography_exists && (mbcGlobalType[4] == bSuperGrid), 0.0,
(mbcGlobalType[5] == bSuperGrid), m_global_zmax,
m_sg_gp_thickness*mGridSize[gBot] );
else
{
m_supergrid_taper_z[mNumberOfGrids-1].define_taper( !m_topography_exists && (mbcGlobalType[4] == bSuperGrid),
0.0, false, m_global_zmax,
m_sg_gp_thickness*mGridSize[gTop] );
m_supergrid_taper_z[0].define_taper( false, 0.0, mbcGlobalType[5]==bSuperGrid, m_global_zmax,
m_sg_gp_thickness*mGridSize[gBot] );
for( int g=1 ; g < mNumberOfGrids-1 ; g++ )
m_supergrid_taper_z[g].define_taper( false, 0.0, false, 0.0, m_sg_gp_thickness*mGridSize[gBot] );
}
}
//-----------------------------------------------------------------------
void EW::assign_supergrid_damping_arrays()
{
int i, j, k, topCartesian;
float_sw4 x, y, z;
#define dcx(i,g) (m_sg_dc_x[g])[i-m_iStart[g]]
#define dcy(j,g) (m_sg_dc_y[g])[j-m_jStart[g]]
#define dcz(k,g) (m_sg_dc_z[g])[k-m_kStart[g]]
#define strx(i,g) (m_sg_str_x[g])[i-m_iStart[g]]
#define stry(j,g) (m_sg_str_y[g])[j-m_jStart[g]]
#define strz(k,g) (m_sg_str_z[g])[k-m_kStart[g]]
#define cornerx(i,g) (m_sg_corner_x[g])[i-m_iStart[g]]
#define cornery(j,g) (m_sg_corner_y[g])[j-m_jStart[g]]
#define cornerz(k,g) (m_sg_corner_z[g])[k-m_kStart[g]]
// topCartesian = mNumberOfCartesianGrids-1;
// Note: compared to WPP2, we don't need to center the damping coefficients on the half-point anymore,
// because the damping term is now 4th order: D+D-( a(x) D+D- ut(x) )
topCartesian = mNumberOfCartesianGrids-1;
if( m_use_supergrid )
{
for( int g=0 ; g<mNumberOfGrids; g++)
{
for( i = m_iStart[g] ; i <= m_iEnd[g] ; i++ )
{
x = (i-1)*mGridSize[g];
dcx(i,g) = m_supergrid_taper_x.dampingCoeff(x);
strx(i,g) = m_supergrid_taper_x.stretching(x);
cornerx(i,g) = m_supergrid_taper_x.cornerTaper(x);
}
for( j = m_jStart[g] ; j <= m_jEnd[g] ; j++ )
{
y = (j-1)*mGridSize[g];
dcy(j,g) = m_supergrid_taper_y.dampingCoeff(y);
stry(j,g) = m_supergrid_taper_y.stretching(y);
cornery(j,g) = m_supergrid_taper_y.cornerTaper(y);
}
if (g > topCartesian || (0 < g && g < mNumberOfGrids-1) ) // Curvilinear or refinement grid
{
// No supergrid damping in the vertical (k-) direction on a curvilinear or refinement grid.
for( k = m_kStart[g] ; k <= m_kEnd[g] ; k++ )
{
dcz(k,g) = 0.;
strz(k,g) = 1;
cornerz(k,g) = 1.;
}
}
else
{
for( k = m_kStart[g] ; k <= m_kEnd[g] ; k++ )
{
z = m_zmin[g] + (k-1)*mGridSize[g];
dcz(k,g) = m_supergrid_taper_z[g].dampingCoeff(z);
strz(k,g) = m_supergrid_taper_z[g].stretching(z);
cornerz(k,g) = m_supergrid_taper_z[g].cornerTaper(z);
}
}
} // end for g...
} // end if m_use_supergrid
else //
{
// Supergrid not used, but define arrays to simplify coding in some places.
for( int g=0 ; g < mNumberOfGrids ; g++ )
{
for( i = m_iStart[g] ; i <= m_iEnd[g] ; i++ )
{
dcx(i,g) = 0;
strx(i,g) = 1;
cornerx(i,g) = 1.;
}
for( j = m_jStart[g] ; j <= m_jEnd[g] ; j++ )
{
dcy(j,g) = 0;
stry(j,g) = 1;
cornery(j,g) = 1.;
}
for( k = m_kStart[g] ; k <= m_kEnd[g] ; k++ )
{
dcz(k,g) = 0.;
strz(k,g) = 1;
cornerz(k,g) = 1.;
}
}
}
copy_supergrid_arrays_to_device();
#undef dcx
#undef dcy
#undef dcz
#undef strx
#undef stry
#undef strz
#undef cornerx
#undef cornery
#undef cornerz
}
//-----------------------------------------------------------------------
void EW::assign_local_bcs( )
{
// This routine assigns m_bcType[g][b], b=0,1,2,3, based on mbcGlobalType, taking parallel overlap boundaries into account
int top=mNumberOfGrids-1; // index of the top grid in the arrays m_iStart, m_iEnd, etc
// horizontal bc's are the same for all grids
for( int g= 0 ; g < mNumberOfGrids ; g++ )
{
// start by copying the global bc's
for (int b=0; b<=3; b++)
m_bcType[g][b] = mbcGlobalType[b];
if (m_iStart[top]+m_ghost_points > 1)
{
m_bcType[g][0] = bProcessor;
}
if (m_iEnd[top]-m_ghost_points < m_global_nx[top])
{
m_bcType[g][1] = bProcessor;
}
if (m_jStart[top]+m_ghost_points > 1)
{
m_bcType[g][2] = bProcessor;
}
if (m_jEnd[top]-m_ghost_points < m_global_ny[top])
{
m_bcType[g][3] = bProcessor;
}
}
// vertical bc's are interpolating except at the bottom and the top, where they equal the global conditions
// ( Only preliminary support for acoustic/elastic, not fully implemented)
m_bcType[top][4] = mbcGlobalType[4];
for( int g = 0 ; g < mNumberOfGrids-1 ; g++ )
{
if( m_is_curvilinear[g+1] && !m_is_curvilinear[g] ) // Elastic case only
m_bcType[g][4] = bCCInterface;
if( !m_is_curvilinear[g+1] && !m_is_curvilinear[g] ) // Two Cartesian grids, must be refinement bndry.
m_bcType[g][4] = bRefInterface;
if( !m_is_curvilinear[g+1] && m_is_curvilinear[g] ) // Acoustic case only
m_bcType[g][4] = bCCInterface;
if( m_is_curvilinear[g+1] && m_is_curvilinear[g] ) // Acoustic/Elastic interface
m_bcType[g][4] = bAEInterface;
}
m_bcType[0][5] = mbcGlobalType[5];
for( int g = 1 ; g < mNumberOfGrids ; g++ )
{
if( m_is_curvilinear[g] && !m_is_curvilinear[g-1] ) // Elastic case
m_bcType[g][5] = bCCInterface;
if( !m_is_curvilinear[g] && !m_is_curvilinear[g-1] ) // Two Cartesian grids, must be refinement bndry.
m_bcType[g][5] = bRefInterface;
if( !m_is_curvilinear[g] && m_is_curvilinear[g-1] ) // Acoustic case
m_bcType[g][5] = bCCInterface;
if( m_is_curvilinear[g] && m_is_curvilinear[g-1] ) // Acoustic/Elastic interface
m_bcType[g][5] = bAEInterface;
}
// Find out which boundaries need one sided approximation in mixed derivatives
for( int g= 0 ; g < mNumberOfGrids ; g++ )
for(int side=4 ; side < 6 ; side++ )
m_onesided[g][side] = (m_bcType[g][side] == bStressFree) ||
(m_bcType[g][side] == bRefInterface) || (m_bcType[g][side] == bAEInterface);
}
//-----------------------------------------------------------------------
void EW::create_output_directory( )
{
if (m_myrank == 0 )
{
cout << "----------------------------------------------------" << endl
<< " Making Output Directory: " << mPath << endl
<< "\t\t" << endl;
// Create directory where all these files will be written.
int err = mkdirs(mPath);
if (err == 0)
cout << "... Done!" << endl
<< "----------------------------------------------------" << endl;
else
{
// fatal error
cerr << endl << "******** Failed to create the output directory *******" << endl << endl;
MPI_Abort(MPI_COMM_WORLD,1);
}
// check that we have write permission on the directory
if (access(mPath.c_str(),W_OK)!=0)
{
// fatal error
cerr << endl << "Error: No write permission on output directory: " << mPath << endl;
MPI_Abort(MPI_COMM_WORLD,1);
}
}
// Let processor 0 finish first!
cout.flush(); cerr.flush();
MPI_Barrier(MPI_COMM_WORLD);
// Check that the mPath directory exists from all processes
struct stat statBuf;
int statErr = stat(mPath.c_str(), &statBuf);
CHECK_INPUT(statErr == 0 && S_ISDIR(statBuf.st_mode), "Error: " << mPath << " is not a directory" << endl);
// check that all processes have write permission on the directory
CHECK_INPUT(access(mPath.c_str(),W_OK)==0,
"Error: No write permission on output directory: " << mPath << endl);
}
//-----------------------------------------------------------------------
int EW::mkdirs(const string& path)
{
// string pathTemp(path.begin(), path.end());
string pathTemp = path;
//-----------------------------------------------------------------
// Recursively call stat and then mkdir on each sub-directory in 'path'
//-----------------------------------------------------------------
string sep = "/";
char * pathtemparg = new char[pathTemp.length()+1];
strcpy(pathtemparg,pathTemp.c_str());
char* token = strtok( pathtemparg, sep.c_str() );
// char* token = strtok(const_cast<char*>(pathTemp.c_str()), sep.c_str());
stringstream pathsofar;
// for checking the status:
struct stat statBuf;
int statErr;
// If there's a leading slash, put it back on...
if (strncmp(pathTemp.c_str(), sep.c_str(), 1) == 0) pathsofar << sep;
while (token != NULL)
{
pathsofar << token << sep;
// test: check the status of the path so far...
// cout << "Calling stat() on path: " << pathsofar.str() << endl;
statErr = stat(pathsofar.str().c_str(), &statBuf);
if (statErr == 0)
{
// cout << "stat() returned successfully." << endl;
if ( S_ISDIR(statBuf.st_mode) )
{
// cout << "stat() says: '" << pathsofar.str() << "' is a directory." << endl;
// it already exists, this is okay, let's get the next directory in the string and skip to the while statement
token = strtok(NULL, sep.c_str());
continue;
}
else
{
cerr << "stat() says: '" << pathsofar.str() << "' is not a directory." << endl;
// real error, let's bail...
delete[] pathtemparg;
return -1;
}
}
else
{
// cerr << "stat() returned an error code." << endl;
if (errno == EACCES)
{
cerr << "Error: **Search permission is denied for one of the directories in the path prefix of " << pathsofar.str() << endl;
delete[] pathtemparg;
return -1;
}
else if (errno == ENOTDIR)
{
cerr << "Error: **A component of the path '" << pathsofar.str() << "' is not a directory. " << endl;
delete[] pathtemparg;
return -1;
}
else if (errno == ENOENT)
{
// this means that we need to call mkdir to create the directory
if (mVerbose >=2)
cout << "Info: **stat returned ENOENT (the path does not exist, or the path " << endl
<< " is an empty string) " << pathsofar.str() << endl;
}
else
{
if (mVerbose >=2)
cout << "Info: **stat returned other error code for path: " << pathsofar.str() << endl;
}
}
// if we got this far, then 'pathsofar' does not exists
// tmp
if (mVerbose >=2) cout << "Calling mkdir() on path: " << pathsofar.str() << endl;
// old code for recursively making the output directory
if (mkdir(pathsofar.str().c_str(),
S_IWUSR | S_IXUSR | S_IRUSR | S_IRGRP | S_IXGRP ) // why do we need group permissions?
== -1)
{
if (mVerbose >=2) cout << "mkdir() returned an error code." << endl;
// check error conditions
if (errno == EEXIST)
{
// can this ever happen since we called stat(), which said that the directory did not exist ???
if (mVerbose >=2) cout << "Info: ** The directory already exists:" << pathsofar.str() << endl;
// it already exists, this is okay!
token = strtok(NULL, sep.c_str());
continue;
}
else if (errno == EACCES)
cerr << "Error: **Write permission is denied for the parent directory in which the new directory is to be added." << pathsofar.str() << endl;
else if (errno == EMLINK)
cerr << "Error: **The parent directory has too many links (entries)." <<
pathsofar.str() << endl;
else if (errno == ENOSPC)
cerr << "Error: **The file system doesn't have enough room to create the new directory." <<
pathsofar.str() << endl;
else if (errno == EROFS)
cerr << "Error: ** The parent directory of the directory being created is on a read-only file system and cannot be modified." << pathsofar.str() << endl;
else if (errno == ENOSPC)
cerr << "Error: ** The new directory cannot be created because the user's disk quota is exhausted." << pathsofar.str() << endl;
// real error, let's bail...
delete[] pathtemparg;
return -1;
}
else
{
if (mVerbose >=2) cout << "mkdir() returned successfully." << endl;
// are there more directories to be made?
token = strtok(NULL, sep.c_str());
}
}
delete[] pathtemparg;
return 0;
}
//-----------------------------------------------------------------------
void EW::computeDT()
{
if (!mQuiet && mVerbose >= 1 && m_myrank == 0 )
printf("*** computing the time step ***\n");
float_sw4 dtloc=1.e10;
for (int g=0; g<mNumberOfCartesianGrids; g++)
{
float_sw4 eigmax = -1;
for (int k=m_kStart[g]; k<=m_kEnd[g]; k++)
for (int j=m_jStart[g]; j<=m_jEnd[g]; j++)
for (int i=m_iStart[g]; i<=m_iEnd[g]; i++)
{
float_sw4 loceig = (4*mMu[g](i,j,k) + mLambda[g](i,j,k) )/mRho[g](i,j,k);
eigmax = loceig > eigmax ? loceig:eigmax;
// dtGP = mCFL*mGridSize[g]/sqrt( loceig );
// dtloc = dtloc < dtGP ? dtloc : dtGP;
}
float_sw4 ieigmax = 1/sqrt(eigmax);
dtloc = dtloc < mCFL*mGridSize[g]*ieigmax ? dtloc : mCFL*mGridSize[g]*ieigmax;
}
if( m_topography_exists )
{
#define SQR(x) (x)*(x)
// Curvilinear grid
float_sw4 dtCurv;
int g = mNumberOfGrids-1;
float_sw4 la, mu, la2mu;
int N=3, LDZ=1, INFO=0;
char JOBZ='N', UPLO='L';
float_sw4 eigmax = -1;
// always use double precision version of lapack routine, for simplicity
double Amat[6], W[3], Z[1], WORK[9];
// do consider ghost points (especially the ghost line above the topography might be important)
for (int k=m_kStart[g]; k<=m_kEnd[g]; k++)
for (int j=m_jStart[g]; j<=m_jEnd[g]; j++)
for (int i=m_iStart[g]; i<=m_iEnd[g]; i++)
{
la = mLambda[g](i,j,k);
mu = mMu[g](i,j,k);
// for( int a = 0 ; a < m_number_mechanisms ; a++ )
// {
// la += mLambdaVE[g][a](i,j,k);
// mu += mMuVE[g][a](i,j,k);
// }
la2mu = la + 2.*mu;
float_sw4 jinv = 1/mJ(i,j,k);
// A11
Amat[0] = -4*(SQR(mMetric(1,i,j,k))*la2mu + SQR(mMetric(1,i,j,k))*mu +
SQR(mMetric(2,i,j,k))*la2mu + SQR(mMetric(3,i,j,k))*mu + SQR(mMetric(4,i,j,k))*mu)*jinv;
// A21 = A12
Amat[1] = -4.*mMetric(2,i,j,k)*mMetric(3,i,j,k)*(mu+la)*jinv;
// A31 = A13
Amat[2] = -4.*mMetric(2,i,j,k)*mMetric(4,i,j,k)*(mu+la)*jinv;
// A22
Amat[3] = -4.*(SQR(mMetric(1,i,j,k))*mu + SQR(mMetric(1,i,j,k))*la2mu +
+ SQR(mMetric(2,i,j,k))*mu + SQR(mMetric(3,i,j,k))*la2mu + SQR(mMetric(4,i,j,k))*mu)*jinv;
// A32 = A23
Amat[4] = -4.*mMetric(3,i,j,k)*mMetric(4,i,j,k)*(mu+la)*jinv;
// A33
Amat[5] = -4.*(SQR(mMetric(1,i,j,k))*mu + SQR(mMetric(1,i,j,k))*mu
+ SQR(mMetric(2,i,j,k))*mu + SQR(mMetric(3,i,j,k))*mu + SQR(mMetric(4,i,j,k))*la2mu)*jinv;
// calculate eigenvalues of symmetric matrix
//#ifndef SW4_CUDA
F77_FUNC(dspev,DSPEV)(JOBZ, UPLO, N, Amat, W, Z, LDZ, WORK, INFO);
//#endif
if (INFO != 0)
{
printf("ERROR: computeDT: dspev returned INFO = %i for grid point (%i, %i, %i)\n", INFO, i, j, k);
printf("lambda = %e, mu = %e\n", la, mu);
printf("Jacobian = %15.7g \n",mJ(i,j,k));
printf("Matrix = \n");
printf(" %15.7g %15.7g %15.7g \n",Amat[0],Amat[1],Amat[2]);
printf(" %15.7g %15.7g %15.7g \n",Amat[1],Amat[3],Amat[4]);
printf(" %15.7g %15.7g %15.7g \n",Amat[2],Amat[4],Amat[5]);
MPI_Abort(MPI_COMM_WORLD, 1);
}
// eigenvalues in ascending order: W[0] < W[1] < W[2]
if (W[0] >= 0.)
{
printf("ERROR: computeDT: determining eigenvalue is non-negative; W[0] = %e at curvilinear grid point (%i, %i, %i)\n", W[0], i, j, k);
MPI_Abort(MPI_COMM_WORLD, 1);
}
float_sw4 loceig = (-W[0])/(4.*mRho[g](i,j,k));
eigmax = loceig > eigmax ? loceig:eigmax;
}
float_sw4 ieigmax = 1/sqrt(eigmax);
dtCurv = mCFL*ieigmax;
dtloc = dtloc<dtCurv ? dtloc: dtCurv;
#undef SQR
} // end if topographyExists()
mDt = dtloc;
// compute the global minima
MPI_Allreduce( &dtloc, &mDt, 1, m_mpifloat, MPI_MIN, m_cartesian_communicator);
if (!mQuiet && mVerbose >= 1 && m_myrank == 0 )
cout << " CFL= " << mCFL << " prel. time step=" << mDt << endl;
if( mTimeIsSet )
{
// constrain the dt based on the goal time
mNumberOfTimeSteps = static_cast<int> ((mTmax - mTstart) / mDt + 0.5);
mNumberOfTimeSteps = (mNumberOfTimeSteps==0)? 1: mNumberOfTimeSteps;
// the resulting mDt could be slightly too large, because the numberOfTimeSteps is rounded to the nearest int
mDt = (mTmax - mTstart) / mNumberOfTimeSteps;
}
}
//-----------------------------------------------------------------------
void EW::computeNearestGridPoint(int & a_i,
int & a_j,
int & a_k,
int & a_g, // grid on which indices are located
float_sw4 a_x,
float_sw4 a_y,
float_sw4 a_z)
{
bool breakLoop = false;
for (int g = 0; g < mNumberOfGrids; g++)
{
if (a_z > m_zmin[g] || g == mNumberOfGrids-1) // We can not trust zmin for the curvilinear grid, since it doesn't mean anything
{
a_i = (int)floor(a_x/mGridSize[g])+1;
if (a_x-((a_i-0.5)*mGridSize[g]) > 0.) (a_i)++;
a_j = (int)floor(a_y/mGridSize[g])+1;
if (a_y-((a_j-0.5)*mGridSize[g]) > 0.) (a_j)++;
a_k = (int)floor((a_z-m_zmin[g])/mGridSize[g])+1; //Note: this component will be garbage for g=curvilinear grid
if (a_z-(m_zmin[g]+(a_k-0.5)*mGridSize[g]) > 0.) (a_k)++;
a_g = g ;
breakLoop = true;
}
else if (a_z == m_zmin[g]) // testing for equality between doubles is kind of pointless...
{
// Point is located on top surface if g=finest grid, else the location is on
// a grid/grid interface, and point is flagged as located on the finer (upper) grid.
if (g == mNumberOfGrids-1)
{
a_i = (int)floor(a_x/mGridSize[g])+1;
if (a_x-((a_i-0.5)*mGridSize[g]) > 0.) (a_i)++;
a_j = (int)floor(a_y/mGridSize[g])+1;
if (a_y-((a_j-0.5)*mGridSize[g]) > 0.) (a_j)++;
a_k = 1;
a_g = g;
}
else
{
a_i = (int)floor(a_x/mGridSize[g+1])+1;
if (a_x-((a_i-0.5)*mGridSize[g+1]) > 0.) (a_i)++;
a_j = (int)floor(a_y/mGridSize[g+1])+1;
if (a_y-((a_j-0.5)*mGridSize[g+1]) > 0.) (a_j)++;
a_k = (int)floor((a_z-m_zmin[g+1])/mGridSize[g+1])+1; // Here, I know I am on a grid line
a_g = g+1 ;
}
breakLoop = true;
}
if (breakLoop)
{
break;
}
}
// if z > zmax in grid 0 because the coordinate has not yet been corrected for topography, we simply set a_k to m_kEnd
if (m_topography_exists && a_z >= m_global_zmax)
{
a_k = m_kEnd[0];
a_g = 0;
}
if (!m_topography_exists || (m_topography_exists && a_g < mNumberOfCartesianGrids))
{
VERIFY2(a_i >= 1-m_ghost_points && a_i <= m_global_nx[a_g]+m_ghost_points,
"Grid Error: i (" << a_i << ") is out of bounds: ( " << 1 << ","
<< m_global_nx[a_g] << ")" << " x,y,z = " << a_x << " " << a_y << " " << a_z);
VERIFY2(a_j >= 1-m_ghost_points && a_j <= m_global_ny[a_g]+m_ghost_points,
"Grid Error: j (" << a_j << ") is out of bounds: ( " << 1 << ","
<< m_global_ny[a_g] << ")" << " x,y,z = " << a_x << " " << a_y << " " << a_z);
VERIFY2(a_k >= m_kStart[a_g] && a_k <= m_kEnd[a_g],
"Grid Error: k (" << a_k << ") is out of bounds: ( " << 1 << ","
<< m_kEnd[a_g]-m_ghost_points << ")" << " x,y,z = " << a_x << " " << a_y << " " << a_z);
}
}
//-----------------------------------------------------------------------
bool EW::interior_point_in_proc(int a_i, int a_j, int a_g)
{
// NOT TAKING PARALLEL GHOST POINTS INTO ACCOUNT!
// Determine if grid point with index (a_i, a_j) on grid a_g is an interior grid point on this processor
bool retval = false;
if (a_g >=0 && a_g < mNumberOfGrids){
retval = (a_i >= m_iStartInt[a_g]) && (a_i <= m_iEndInt[a_g]) &&
(a_j >= m_jStartInt[a_g]) && (a_j <= m_jEndInt[a_g]);
}
return retval;
}
//-----------------------------------------------------------------------
bool EW::point_in_proc(int a_i, int a_j, int a_g)
{
// TAKING PARALLEL GHOST POINTS INTO ACCOUNT!
// Determine if grid point with index (a_i, a_j) on grid a_g is a grid point on this processor
bool retval = false;
if (a_g >=0 && a_g < mNumberOfGrids){
retval = (a_i >= m_iStart[a_g] && a_i <= m_iEnd[a_g] &&
a_j >= m_jStart[a_g] && a_j <= m_jEnd[a_g] );
}
return retval;
}
//-----------------------------------------------------------------------
bool EW::point_in_proc_ext(int a_i, int a_j, int a_g)
{
// TAKING PARALLEL GHOST POINTS+EXTRA GHOST POINTS INTO ACCOUNT!
// Determine if grid point with index (a_i, a_j) on grid a_g is a grid point on this processor
bool retval = false;
if (a_g >=0 && a_g < mNumberOfGrids){
retval = (a_i >= m_iStart[a_g]-m_ext_ghost_points && a_i <= m_iEnd[a_g]+m_ext_ghost_points &&
a_j >= m_jStart[a_g]-m_ext_ghost_points && a_j <= m_jEnd[a_g]+m_ext_ghost_points );
}
return retval;
}
//-----------------------------------------------------------------------
bool EW::is_onesided( int g, int side ) const
{
return m_onesided[g][side] == 1;
}
//-----------------------------------------------------------------------
void EW::print_execution_time( double t1, double t2, string msg )
{
// if( !mQuiet && proc_zero() )
if( m_myrank == 0 )
{
double s = t2 - t1;
int h = static_cast<int>(s/3600.0);
s = s - h*3600;
int m = static_cast<int>(s/60.0);
s = s - m*60;
cout << " Execution time, " << msg << " ";
if( h > 1 )
cout << h << " hours ";
else if( h > 0 )
cout << h << " hour ";
if( m > 1 )
cout << m << " minutes ";
else if( m > 0 )
cout << m << " minute ";
if( s > 0 )
cout << s << " seconds " ;
cout << endl;
}
}
//-----------------------------------------------------------------------
void EW::print_execution_times( double times[8] )
{
double* time_sums =new double[8*m_nprocs];
MPI_Gather( times, 8, MPI_DOUBLE, time_sums, 8, MPI_DOUBLE, 0, MPI_COMM_WORLD );
bool printavgs = true;
if( m_myrank == 0 )
{
double avgs[8]={0,0,0,0,0,0,0,0};
for( int p= 0 ; p < m_nprocs ; p++ )
for( int c=0 ; c < 8 ; c++ )
avgs[c] += time_sums[8*p+c];
for( int c=0 ; c < 8 ; c++ )
avgs[c] /= m_nprocs;
cout << "\n----------------------------------------" << endl;
cout << " Execution time summary " << endl;
// cout << "Processor Total BC total Step Image&Time series Comm.ref Comm.bndry BC impose "
if( printavgs )
{
cout << " Total BC comm BC phys Scheme Supergrid Forcing "
<<endl;
cout.setf(ios::left);
cout.precision(5);
cout.width(11);
cout << avgs[7];
cout.width(11);
cout << avgs[2];
cout.width(11);
cout << avgs[3];
cout.width(11);
cout << avgs[1];
cout.width(11);
cout << avgs[4];
cout.width(11);
cout << avgs[0];
cout.width(11);
}
else
{
cout << "Processor Total BC comm BC phys Scheme Supergrid Forcing "
<<endl;
cout.setf(ios::left);
cout.precision(5);
for( int p= 0 ; p < m_nprocs ; p++ )
{
cout.width(11);
cout << p;
cout.width(11);
cout << time_sums[8*p+7];
cout.width(11);
cout << time_sums[8*p+2];
cout.width(11);
cout << time_sums[8*p+3];
cout.width(11);
cout << time_sums[8*p+1];
cout.width(11);
cout << time_sums[8*p+4];
cout.width(11);
cout << time_sums[8*p];
cout.width(11);
// cout << time_sums[7*p+4];
// cout.width(11);
// cout << time_sums[7*p+5];
// cout.width(11);
// cout << time_sums[7*p+6];
cout << endl;
}
}
//
// << "|" << time_sums[p*7+3] << "|\t" << time_sums[p*7+1] << "|\t" << time_sums[p*7]
// << "|\t " << time_sums[7*p+2] << "|\t" << time_sums[p*7+4] << "|\t" << time_sums[p*7+5]
// << "|\t" << time_sums[7*p+6]<<endl;
cout << "Clock tick is " << MPI_Wtick() << " seconds" << endl;
// cout << "MPI_Wtime is ";
// int flag;
// bool wtime_is_global;
// MPI_Comm_get_attr( MPI_COMM_WORLD, MPI_WTIME_IS_GLOBAL, &wtime_is_global, &flag );
// if( wtime_is_global )
// cout << "global";
// else
// cout << "local";
// cout << endl;
cout << "----------------------------------------\n" << endl;
cout.setf(ios::right);
cout.precision(6);
// Save timings to file
string fname = mPath+"timings.bin";
int fd=open( fname.c_str(), O_TRUNC|O_CREAT|O_WRONLY, 0660 );
if( fd == -1 )
cout << "Error opening " << fname.c_str() << " for writing execution times" << endl;
size_t nr=write(fd,&m_nprocs,sizeof(int));
if( nr != sizeof(int) )
cout << "Error writing nprocs on " << fname.c_str() << " nr = " << nr << " bytes" << endl;
nr = write(fd, time_sums, 7*m_nprocs*sizeof(double));
if( nr != 7*m_nprocs*sizeof(double) )
cout << "Error writing time_sums on " << fname.c_str() << " nr = " << nr << " bytes" << endl;
close(fd);
}
delete[] time_sums;
}
//-----------------------------------------------------------------------
bool EW::check_for_match_on_cpu_gpu( vector<Sarray>& a_U, int verbose, string name )
{
bool retval=false;
if( m_cuobj->has_gpu() )
{
retval = false;
for( int g=0 ; g<mNumberOfGrids; g++ )
{
size_t nn=a_U[g].check_match_cpu_gpu( m_cuobj, name );
retval = retval || nn > 0;
if( nn > 0 && verbose == 1 )
{
int cnan, inan, jnan, knan;
a_U[g].check_match_cpu_gpu( m_cuobj, cnan, inan, jnan, knan, name );
cout << "grid " << g << " array " << name << " found " << nn << " dismatch. First dismatch at " <<
cnan << " " << inan << " " << jnan << " " << knan << endl;
}
}
}
return retval;
}
//-----------------------------------------------------------------------
void EW::setup_materials()
{
// Point source test sets material directly in processTestPointSource
if( !m_point_source_test )
{
// Undefined q-factors, attenutation not yet implemented
vector<Sarray> Qs(mNumberOfGrids), Qp(mNumberOfGrids);
for( int b=0 ; b < m_mtrlblocks.size() ; b++ )
m_mtrlblocks[b]->set_material_properties( mRho, mMu, mLambda, Qs, Qp );
// Here mMu contains cs, and mLambda contains cp
int g = mNumberOfGrids-1;
extrapolateInZ( g, mRho[g], true, false );
extrapolateInZ( g, mLambda[g], true, false );
extrapolateInZ( g, mMu[g], true, false );
g = 0;
extrapolateInZ( g, mRho[g], false, true );
extrapolateInZ( g, mLambda[g], false, true );
extrapolateInZ( g, mMu[g], false, true );
extrapolateInXY( mRho );
extrapolateInXY( mMu );
extrapolateInXY( mLambda );
// Convert mMu to mu, and mLambda to lambda
convert_material_to_mulambda( );
}
}
//-----------------------------------------------------------------------
void EW::convert_material_to_mulambda( )
{
for( int g = 0 ; g < mNumberOfGrids; g++)
{
// On input, we have stored cs in MU, cp in Lambda
// use mu = rho*cs*cs and lambda = rho*cp*cp - 2*mu
for( int k = m_kStart[g] ; k <= m_kEnd[g]; k++ )
{
for( int j = m_jStart[g] ; j <= m_jEnd[g]; j++ )
{
for( int i = m_iStart[g] ; i <= m_iEnd[g] ; i++ )
{
mMu[g](i,j,k) = mRho[g](i,j,k)*mMu[g](i,j,k)*mMu[g](i,j,k);
mLambda[g](i,j,k) = mRho[g](i,j,k)*mLambda[g](i,j,k)*mLambda[g](i,j,k)-2*mMu[g](i,j,k);
}
}
}
}
}
//-----------------------------------------------------------------------
void EW::extrapolateInXY( vector<Sarray>& field )
{
for( int g= 0; g < mNumberOfGrids ; g++ )
{
if( m_iStartInt[g] == 1 )
for( int k=m_kStart[g] ; k <= m_kEnd[g] ; k++ )
for( int j=m_jStart[g] ; j <= m_jEnd[g] ; j++ )
for( int i=m_iStart[g] ; i < 1 ; i++ )
{
if( field[g](i,j,k) == -1 )
field[g](i,j,k) = field[g](1,j,k);
}
if( m_iEndInt[g] == m_global_nx[g] )
for( int k=m_kStart[g] ; k <= m_kEnd[g] ; k++ )
for( int j=m_jStart[g] ; j <= m_jEnd[g] ; j++ )
for( int i=m_iEndInt[g]+1 ; i <= m_iEnd[g] ; i++ )
{
if( field[g](i,j,k) == -1 )
field[g](i,j,k) = field[g](m_iEndInt[g],j,k);
}
if( m_jStartInt[g] == 1 )
for( int k=m_kStart[g] ; k <= m_kEnd[g] ; k++ )
for( int j=m_jStart[g] ; j < 1 ; j++ )
for( int i=m_iStart[g] ; i <= m_iEnd[g] ; i++ )
{
if( field[g](i,j,k) == -1 )
field[g](i,j,k) = field[g](i,1,k);
}
if( m_jEndInt[g] == m_global_ny[g] )
for( int k=m_kStart[g] ; k <= m_kEnd[g] ; k++ )
for( int j=m_jEndInt[g]+1 ; j <= m_jEnd[g] ; j++ )
for( int i=m_iStart[g] ; i <= m_iEnd[g] ; i++ )
{
if( field[g](i,j,k) == -1 )
field[g](i,j,k) = field[g](i,m_jEndInt[g],k);
}
// corners not necessary to treat explicitly???
}
}
//-----------------------------------------------------------------------
void EW::extrapolateInZ( int g, Sarray& field, bool lowk, bool highk )
{
if( lowk )
for( int k=m_kStart[g] ; k < 1 ; k++ )
for( int j=m_jStart[g] ; j <= m_jEnd[g] ; j++ )
for( int i=m_iStart[g] ; i <= m_iEnd[g] ; i++ )
if( field(i,j,k) == -1 )
field(i,j,k) = field(i,j,1);
if( highk )
for( int k=m_kEndInt[g]+1 ; k <= m_kEnd[g] ; k++ )
for( int j=m_jStart[g] ; j <= m_jEnd[g] ; j++ )
for( int i=m_iStart[g] ; i <= m_iEnd[g] ; i++ )
if( field(i,j,k) == -1 )
field(i,j,k) = field(i,j,m_kEndInt[g]);
}
//-----------------------------------------------------------------------
void EW::getGlobalBoundingBox(float_sw4 bbox[6])
{
bbox[0] = 0.;
bbox[1] = m_global_xmax;
bbox[2] = 0.;
bbox[3] = m_global_ymax;
bbox[4] = m_global_zmin;
bbox[5] = m_global_zmax;
}
//-----------------------------------------------------------------------
bool EW::getDepth( float_sw4 x, float_sw4 y, float_sw4 z, float_sw4 & depth )
{
if( !m_topography_exists )
{
depth = z;
return true;
}
else
{
float_sw4 ztopo=0;
if( find_topo_zcoord_owner(x,y,ztopo) )
{
depth = z-ztopo;
return true;
}
else
return false;
}
}
//-----------------------------------------------------------------------
bool EW::interpolate_topography( float_sw4 q, float_sw4 r, float_sw4 & Z0, bool smoothed)
{
// Interpolate the smoothed or raw topography
// Assume that (q,r) are indices in the curvilinear grid.
// if (q,r) is on this processor (need a 2x2 interval in (i,j)-index space:
// Return true and assign Z0 corresponding to (q,r)
// Returns false if
// 1) (q,r) is outside the global parameter domain (expanded by ghost points)
// 2) (q,r) is not on this processor
// NOTE:
// The parameters are normalized such that 1 <= q <= Nx is the full domain (without ghost points),
// 1 <= r <= Ny.
// 0. No topography, easy case:
if( !topographyExists() )
{
Z0 = 0;
return true;
}
// 1. Check that the point is inside the domain
int g = mNumberOfGrids-1;
float_sw4 h = mGridSize[g];
float_sw4 qMin = (float_sw4) (1- m_ghost_points);
float_sw4 qMax = (float_sw4) (m_global_nx[g] + m_ghost_points);
float_sw4 rMin = (float_sw4) (1- m_ghost_points);
float_sw4 rMax = (float_sw4) (m_global_ny[g] + m_ghost_points);
if (!(q >= qMin && q <= qMax && r >= rMin && r <= rMax))
{
Z0 = 0;
return false;
}
// 2. Compute elevation at (q,r)
float_sw4 tau; // holds the elevation at (q,r). Recall that elevation=-z
if (m_analytical_topo)
{
float_sw4 X0 = (q-1.0)*h;
float_sw4 Y0 = (r-1.0)*h;
float_sw4 igx2 = 1.0/(m_GaussianLx*m_GaussianLx);
float_sw4 igy2 = 1.0/(m_GaussianLy*m_GaussianLy);
tau = m_GaussianAmp*exp(-(X0-m_GaussianXc)*(X0-m_GaussianXc)*igx2
-(Y0-m_GaussianYc)*(Y0-m_GaussianYc)*igy2 );
}
else
{
// 3.Compute nearest grid point
int iNear = static_cast<int>(round(q));
int jNear = static_cast<int>(round(r));
bool smackOnTop = (fabs(iNear-q) < 1.e-9 && fabs(jNear-r)) < 1.e-9;
if (smackOnTop && point_in_proc(iNear,jNear,g))
{
// 3a. (q,r) coincides with a grid point. Get elevation at that point.
if (smoothed)
tau = mTopoGridExt(iNear,jNear,1);
else
tau = mTopo(iNear,jNear,1);
}
else
{
// 3b. (q,r) not at a grid point. Interpolate to get the elevation.
// Nearest lower grid point:
int i = static_cast<int>(floor(q));
int j = static_cast<int>(floor(r));
if( point_in_proc_ext(i-3,j-3,g) && point_in_proc_ext(i+4,j+4,g) )
{
float_sw4 a6cofi[8], a6cofj[8];
gettopowgh( q-i, a6cofi );
gettopowgh( r-j, a6cofj );
tau = 0;
for( int l=j-3 ; l <= j+4 ; l++ )
for( int k=i-3 ; k <= i+4 ; k++ )
tau += a6cofi[k-i+3]*a6cofj[l-j+3]*mTopoGridExt(k,l,1);
}
else
{
Z0 = 0;
return false;
}
}
}
Z0 = -tau;
return true;
}
//-----------------------------------------------------------------------
void EW::gettopowgh( float_sw4 ai, float_sw4 wgh[8] ) const
{
float_sw4 pol = ai*ai*ai*ai*ai*ai*ai*(-251+135*ai+25*ai*ai-
33*ai*ai*ai+6*ai*ai*ai*ai)/720;
wgh[0] = -1.0/60*ai + 1.0/180*ai*ai + 1.0/48*ai*ai*ai + 23.0/144*ai*ai*ai*ai
- (17.0*ai + 223.0)*ai*ai*ai*ai*ai/720 - pol;
wgh[1] = 3.0/20*ai -3.0/40*ai*ai -1.0/6*ai*ai*ai - 13.0/12*ai*ai*ai*ai +
97.0/45*ai*ai*ai*ai*ai + 1.0/6*ai*ai*ai*ai*ai*ai + 7*pol;
wgh[2] = -0.75*ai +0.75*ai*ai+(13.0+155*ai)*ai*ai*ai/48 -103.0/16*ai*ai*ai*ai*ai
- 121.0/240*ai*ai*ai*ai*ai*ai - 21*pol;
wgh[3] = 1 - 49.0/36*ai*ai - 49.0/9*ai*ai*ai*ai+385.0/36*ai*ai*ai*ai*ai +
61.0/72*ai*ai*ai*ai*ai*ai + 35*pol;
wgh[4] = 0.75*ai + 0.75*ai*ai - 13.0/48*ai*ai*ai + 89.0/16*ai*ai*ai*ai -
1537.0/144*ai*ai*ai*ai*ai - 41.0/48*ai*ai*ai*ai*ai*ai - 35*pol;
wgh[5] = -3.0/20*ai - 3.0/40*ai*ai + 1.0/6*ai*ai*ai - 41.0/12*ai*ai*ai*ai
+ 6.4*ai*ai*ai*ai*ai + 31.0/60*ai*ai*ai*ai*ai*ai + 21*pol;
wgh[6] = 1.0/60*ai + 1.0/180*ai*ai - 1.0/48*ai*ai*ai + 167.0/144*ai*ai*ai*ai -
1537.0/720*ai*ai*ai*ai*ai- 25.0/144*ai*ai*ai*ai*ai*ai - 7*pol;
wgh[7] = -1.0/6*ai*ai*ai*ai + 11.0/36*ai*ai*ai*ai*ai + 1.0/40*ai*ai*ai*ai*ai*ai + pol;
}
//-----------------------------------------------------------------------
void EW::grid_mapping( float_sw4 q, float_sw4 r, float_sw4 s, float_sw4& x,
float_sw4& y, float_sw4& z )
{
int g=mNumberOfGrids-1;
float_sw4 h=mGridSize[g];
x = (q-1)*h;
y = (r-1)*h;
float_sw4 ztopo;
if( interpolate_topography(q,r,ztopo,true) )
{
int nz = m_global_nz[g];
float_sw4 izb = 1.0/(m_zetaBreak*(nz-1));
float_sw4 sa = (s-1)*izb;
float_sw4 omsm = (1-sa);
for( int l=2 ; l <= m_grid_interpolation_order ; l++ )
omsm *= (1-sa);
if( sa >= 1 )
z = m_topo_zmax - (nz-s)*h;
else
z = m_topo_zmax - (nz-s)*h - omsm*(m_topo_zmax-(nz-1)*h-ztopo);
}
else
z = -1e38;
// double zloc = z;
// MPI_Allreduce( &zloc, &z, 1, MPI_DOUBLE, MPI_MAX, m_cartesian_communicator );
}
//-----------------------------------------------------------------------
bool EW::invert_grid_mapping( int g, float_sw4 x, float_sw4 y, float_sw4 z,
float_sw4& q, float_sw4& r, float_sw4& s )
{
// Translates (x,y,z) to grid indices on grid g.
// Successful only if (x,y) is in my processor, will return false if
// the point is outside the processor.
//
bool success=true;
q = x/mGridSize[g]+1;
r = y/mGridSize[g]+1;
if( g < mNumberOfCartesianGrids )
s = (z-m_zmin[g])/mGridSize[g]+1;
else
{
// Grid is curvilinear
// Maximum number of iterations, and error tolerance
// for Newton iterations
int maxit = 10;
float_sw4 tol = 1e-9;
float_sw4 zTopo;
if( interpolate_topography(q, r, zTopo, true ) )
{
int nz = m_global_nz[g];
float_sw4 h = mGridSize[g];
float_sw4 izb = 1.0/m_zetaBreak;
// Elastic region top grid, sun is s normalized to 0 < sun < 1
float_sw4 sun = 1-(m_topo_zmax-z)/((nz-1)*h);
if( sun >= m_zetaBreak )
// In uniform part of grid
s = (nz-1)*sun+1;
else
{
// Non-uniform, solve for s by Newton iteration
int it = 0;
float_sw4 numerr=tol+1;
while( numerr > tol && it < maxit )
{
float_sw4 omsm = (1-izb*sun);
for( int l=2 ; l <= m_grid_interpolation_order-1 ; l++ )
omsm *= (1-izb*sun);
float_sw4 fp = h*(nz-1) + izb*m_grid_interpolation_order*omsm*(m_topo_zmax - (nz-1)*h - zTopo);
omsm *= (1-izb*sun);
float_sw4 f = m_topo_zmax - (nz-1)*h*(1-sun) - omsm*(m_topo_zmax-(nz-1)*h-zTopo)-z;
float_sw4 ds= f/fp;
numerr = fabs(ds);
sun = sun - ds;
it++;
}
s = (nz-1)*sun+1;
if( numerr >= tol )
{
cout << "EW::invert_grid_mapping: WARNING no convergence err=" << numerr << " tol = " << tol << endl;
s = -1e38;
success = false;
}
}
}
else
{
// point not in processor, could not evaluate topography
s = -1e38;
success = false;
}
}
return success;
}
//-----------------------------------------------------------------------
void EW::computeGeographicCoord(float_sw4 x, float_sw4 y, float_sw4 & longitude, float_sw4 & latitude)
{
float_sw4 deg2rad = M_PI/180.0;
float_sw4 phi = mGeoAz * deg2rad;
latitude = mLatOrigin +
(x*cos(phi) - y*sin(phi))/mMetersPerDegree;
if (mConstMetersPerLongitude)
{
longitude = mLonOrigin +
(x*sin(phi) + y*cos(phi))/(mMetersPerLongitude);
}
else
{
longitude = mLonOrigin +
(x*sin(phi) + y*cos(phi))/(mMetersPerDegree*cos(latitude*deg2rad));
}
}
//-----------------------------------------------------------------------
void EW::computeCartesianCoord(float_sw4 &x, float_sw4 &y, float_sw4 lon, float_sw4 lat)
{
// -----------------------------------------------------------------
// Compute the cartesian coordinate given the geographic coordinate
// -----------------------------------------------------------------
// if( m_geoproj == 0 )
// // compute x and y
{
float_sw4 deg2rad = M_PI/180.0;
float_sw4 phi = mGeoAz * deg2rad;
// x = mMetersPerDegree*(cos(phi)*(lat-mLatOrigin) + cos(lat*deg2rad)*(lon-mLonOrigin)*sin(phi));
// y = mMetersPerDegree*(-sin(phi)*(lat-mLatOrigin) + cos(lat*deg2rad)*(lon-mLonOrigin)*cos(phi));
if (mConstMetersPerLongitude)
{
x = mMetersPerDegree*cos(phi)*(lat-mLatOrigin) + mMetersPerLongitude*(lon-mLonOrigin)*sin(phi);
y = mMetersPerDegree*(-sin(phi))*(lat-mLatOrigin) + mMetersPerLongitude*(lon-mLonOrigin)*cos(phi);
}
else
{
x = mMetersPerDegree*(cos(phi)*(lat-mLatOrigin) + cos(lat*deg2rad)*(lon-mLonOrigin)*sin(phi));
y = mMetersPerDegree*(-sin(phi)*(lat-mLatOrigin) + cos(lat*deg2rad)*(lon-mLonOrigin)*cos(phi));
}
}
// else
// m_geoproj->computeCartesianCoord(x,y,lon,lat);
}
//-----------------------------------------------------------------------
void EW::get_utc( int utc[7] ) const
{
for( int c=0 ; c < 7 ; c++ )
utc[c] = m_utc0[c];
}
//-----------------------------------------------------------------------
void EW::extractRecordData(TimeSeries::receiverMode mode, int i0, int j0, int k0, int g0,
vector<float_sw4> &uRec, vector<Sarray> &Um2, vector<Sarray> &U)
{
if (mode == TimeSeries::Displacement)
{
uRec.resize(3);
uRec[0] = U[g0](1, i0, j0, k0);
uRec[1] = U[g0](2, i0, j0, k0);
uRec[2] = U[g0](3, i0, j0, k0);
}
else if (mode == TimeSeries::Velocity)
{
uRec.resize(3);
uRec[0] = (U[g0](1, i0, j0, k0) - Um2[g0](1, i0, j0, k0))/(2*mDt);
uRec[1] = (U[g0](2, i0, j0, k0) - Um2[g0](2, i0, j0, k0))/(2*mDt);
uRec[2] = (U[g0](3, i0, j0, k0) - Um2[g0](3, i0, j0, k0))/(2*mDt);
}
else if(mode == TimeSeries::Div)
{
uRec.resize(1);
if (g0 < mNumberOfCartesianGrids) // must be a Cartesian grid
{
// int i=m_i0, j=m_j0, k=m_k0, g=m_grid0;
float_sw4 factor = 1.0/(2*mGridSize[g0]);
uRec[0] = ((U[g0](1,i0+1, j0, k0) - U[g0](1,i0-1, j0, k0)+
U[g0](2,i0, j0+1, k0) - U[g0](2,i0, j0-1, k0)+
U[g0](3,i0, j0, k0+1) - U[g0](3,i0, j0, k0-1))*factor);
}
else // must be curvilinear
{
// int i=m_i0, j=m_j0, k=m_k0, g=m_grid0;
float_sw4 factor = 0.5/sqrt(mJ(i0,j0,k0));
uRec[0] = ( ( mMetric(1,i0,j0,k0)*(U[g0](1,i0+1,j0,k0) - U[g0](1,i0-1,j0,k0))+
mMetric(1,i0,j0,k0)*(U[g0](2,i0,j0+1,k0) - U[g0](2,i0,j0-1,k0))+
mMetric(2,i0,j0,k0)*(U[g0](1,i0,j0,k0+1) - U[g0](1,i0,j0,k0-1))+
mMetric(3,i0,j0,k0)*(U[g0](2,i0,j0,k0+1) - U[g0](2,i0,j0,k0-1))+
mMetric(4,i0,j0,k0)*(U[g0](3,i0,j0,k0+1) - U[g0](3,i0,j0,k0-1)) )*factor);
}
} // end div
else if(mode == TimeSeries::Curl)
{
uRec.resize(3);
if (g0 < mNumberOfCartesianGrids) // must be a Cartesian grid
{
// int i=m_i0, j=m_j0, k=m_k0, g=m_grid0;
float_sw4 factor = 1.0/(2*mGridSize[g0]);
float_sw4 duydx = (U[g0](2,i0+1,j0,k0) - U[g0](2,i0-1,j0,k0))*factor;
float_sw4 duzdx = (U[g0](3,i0+1,j0,k0) - U[g0](3,i0-1,j0,k0))*factor;
float_sw4 duxdy = (U[g0](1,i0,j0+1,k0) - U[g0](1,i0,j0-1,k0))*factor;
float_sw4 duzdy = (U[g0](3,i0,j0+1,k0) - U[g0](3,i0,j0-1,k0))*factor;
float_sw4 duxdz = (U[g0](1,i0,j0,k0+1) - U[g0](1,i0,j0,k0-1))*factor;
float_sw4 duydz = (U[g0](2,i0,j0,k0+1) - U[g0](2,i0,j0,k0-1))*factor;
// if( m_xycomponent )
// {
uRec[0] = ( duzdy-duydz );
uRec[1] = ( duxdz-duzdx );
uRec[2] = ( duydx-duxdy );
// }
// else
// {
// float_sw4 uns = m_thynrm*(duzdy-duydz)-m_thxnrm*(duxdz-duzdx);
// float_sw4 uew = m_salpha*(duzdy-duydz)+m_calpha*(duxdz-duzdx);
// mRecordedUX.push_back( uew );
// mRecordedUY.push_back( uns );
// mRecordedUZ.push_back( -(duydx-duxdy) );
// }
}
else // must be curvilinear
{
// int i=m_i0, j=m_j0, k=m_k0, g=m_grid0;
float_sw4 factor = 0.5/sqrt(mJ(i0,j0,k0));
float_sw4 duxdq = (U[g0](1,i0+1,j0,k0) - U[g0](1,i0-1,j0,k0));
float_sw4 duydq = (U[g0](2,i0+1,j0,k0) - U[g0](2,i0-1,j0,k0));
float_sw4 duzdq = (U[g0](3,i0+1,j0,k0) - U[g0](3,i0-1,j0,k0));
float_sw4 duxdr = (U[g0](1,i0,j0+1,k0) - U[g0](1,i0,j0-1,k0));
float_sw4 duydr = (U[g0](2,i0,j0+1,k0) - U[g0](2,i0,j0-1,k0));
float_sw4 duzdr = (U[g0](3,i0,j0+1,k0) - U[g0](3,i0,j0-1,k0));
float_sw4 duxds = (U[g0](1,i0,j0,k0+1) - U[g0](1,i0,j0,k0-1));
float_sw4 duyds = (U[g0](2,i0,j0,k0+1) - U[g0](2,i0,j0,k0-1));
float_sw4 duzds = (U[g0](3,i0,j0,k0+1) - U[g0](3,i0,j0,k0-1));
float_sw4 duzdy = mMetric(1,i0,j0,k0)*duzdr+mMetric(3,i0,j0,k0)*duzds;
float_sw4 duydz = mMetric(4,i0,j0,k0)*duyds;
float_sw4 duxdz = mMetric(4,i0,j0,k0)*duxds;
float_sw4 duzdx = mMetric(1,i0,j0,k0)*duzdq+mMetric(2,i0,j0,k0)*duzds;
float_sw4 duydx = mMetric(1,i0,j0,k0)*duydq+mMetric(2,i0,j0,k0)*duyds;
float_sw4 duxdy = mMetric(1,i0,j0,k0)*duxdr+mMetric(3,i0,j0,k0)*duxds;
// if( m_xycomponent )
// {
uRec[0] = (duzdy-duydz)*factor;
uRec[1] = (duxdz-duzdx)*factor;
uRec[2] = (duydx-duxdy)*factor;
// }
// else
// {
// float_sw4 uns = m_thynrm*(duzdy-duydz)-m_thxnrm*(duxdz-duzdx);
// float_sw4 uew = m_salpha*(duzdy-duydz)+m_calpha*(duxdz-duzdx);
// mRecordedUX.push_back( uew*factor );
// mRecordedUY.push_back( uns*factor );
// mRecordedUZ.push_back( -(duydx-duxdy)*factor );
// }
}
} // end Curl
else if(mode == TimeSeries::Strains )
{
uRec.resize(6);
if (g0 < mNumberOfCartesianGrids) // must be a Cartesian grid
{
// int i=m_i0, j=m_j0, k=m_k0, g=m_grid0;
float_sw4 factor = 1.0/(2*mGridSize[g0]);
float_sw4 duydx = (U[g0](2,i0+1,j0,k0) - U[g0](2,i0-1,j0,k0))*factor;
float_sw4 duzdx = (U[g0](3,i0+1,j0,k0) - U[g0](3,i0-1,j0,k0))*factor;
float_sw4 duxdy = (U[g0](1,i0,j0+1,k0) - U[g0](1,i0,j0-1,k0))*factor;
float_sw4 duzdy = (U[g0](3,i0,j0+1,k0) - U[g0](3,i0,j0-1,k0))*factor;
float_sw4 duxdz = (U[g0](1,i0,j0,k0+1) - U[g0](1,i0,j0,k0-1))*factor;
float_sw4 duydz = (U[g0](2,i0,j0,k0+1) - U[g0](2,i0,j0,k0-1))*factor;
float_sw4 duxdx = (U[g0](1,i0+1,j0,k0) - U[g0](1,i0-1,j0,k0))*factor;
float_sw4 duydy = (U[g0](2,i0,j0+1,k0) - U[g0](2,i0,j0-1,k0))*factor;
float_sw4 duzdz = (U[g0](3,i0,j0,k0+1) - U[g0](3,i0,j0,k0-1))*factor;
uRec[0] = ( duxdx );
uRec[1] = ( duydy );
uRec[2] = ( duzdz );
uRec[3] = ( 0.5*(duydx+duxdy) );
uRec[4] = ( 0.5*(duzdx+duxdz) );
uRec[5] = ( 0.5*(duydz+duzdy) );
}
else // must be curvilinear
{
// int i=m_i0, j=m_j0, k0=m_k00, g0=m_grid0;
float_sw4 factor = 0.5/sqrt(mJ(i0,j0,k0));
float_sw4 duxdq = (U[g0](1,i0+1,j0,k0) - U[g0](1,i0-1,j0,k0));
float_sw4 duydq = (U[g0](2,i0+1,j0,k0) - U[g0](2,i0-1,j0,k0));
float_sw4 duzdq = (U[g0](3,i0+1,j0,k0) - U[g0](3,i0-1,j0,k0));
float_sw4 duxdr = (U[g0](1,i0,j0+1,k0) - U[g0](1,i0,j0-1,k0));
float_sw4 duydr = (U[g0](2,i0,j0+1,k0) - U[g0](2,i0,j0-1,k0));
float_sw4 duzdr = (U[g0](3,i0,j0+1,k0) - U[g0](3,i0,j0-1,k0));
float_sw4 duxds = (U[g0](1,i0,j0,k0+1) - U[g0](1,i0,j0,k0-1));
float_sw4 duyds = (U[g0](2,i0,j0,k0+1) - U[g0](2,i0,j0,k0-1));
float_sw4 duzds = (U[g0](3,i0,j0,k0+1) - U[g0](3,i0,j0,k0-1));
float_sw4 duzdy = (mMetric(1,i0,j0,k0)*duzdr+mMetric(3,i0,j0,k0)*duzds)*factor;
float_sw4 duydz = (mMetric(4,i0,j0,k0)*duyds)*factor;
float_sw4 duxdz = (mMetric(4,i0,j0,k0)*duxds)*factor;
float_sw4 duzdx = (mMetric(1,i0,j0,k0)*duzdq+mMetric(2,i0,j0,k0)*duzds)*factor;
float_sw4 duydx = (mMetric(1,i0,j0,k0)*duydq+mMetric(2,i0,j0,k0)*duyds)*factor;
float_sw4 duxdy = (mMetric(1,i0,j0,k0)*duxdr+mMetric(3,i0,j0,k0)*duxds)*factor;
float_sw4 duxdx = ( mMetric(1,i0,j0,k0)*(U[g0](1,i0+1,j0,k0) - U[g0](1,i0-1,j0,k0))+
mMetric(2,i0,j0,k0)*(U[g0](1,i0,j0,k0+1) - U[g0](1,i0,j0,k0-1)) )*factor;
float_sw4 duydy = ( mMetric(1,i0,j0,k0)*(U[g0](2,i0,j0+1,k0) - U[g0](2,i0,j0-1,k0))+
mMetric(3,i0,j0,k0)*(U[g0](2,i0,j0,k0+1) - U[g0](2,i0,j0,k0-1)) )*factor;
float_sw4 duzdz = ( mMetric(4,i0,j0,k0)*(U[g0](3,i0,j0,k0+1) - U[g0](3,i0,j0,k0-1)) )*factor;
uRec[0] = ( duxdx );
uRec[1] = ( duydy );
uRec[2] = ( duzdz );
uRec[3] = ( 0.5*(duydx+duxdy) );
uRec[4] = ( 0.5*(duzdx+duxdz) );
uRec[5] = ( 0.5*(duydz+duzdy) );
}
} // end Strains
else if(mode == TimeSeries::DisplacementGradient )
{
uRec.resize(9);
if (g0 < mNumberOfCartesianGrids) // must be a Cartesian grid
{
// int i=m_i0, j=m_j0, k=m_k0, g=m_grid0;
float_sw4 factor = 1.0/(2*mGridSize[g0]);
float_sw4 duydx = (U[g0](2,i0+1,j0,k0) - U[g0](2,i0-1,j0,k0))*factor;
float_sw4 duzdx = (U[g0](3,i0+1,j0,k0) - U[g0](3,i0-1,j0,k0))*factor;
float_sw4 duxdy = (U[g0](1,i0,j0+1,k0) - U[g0](1,i0,j0-1,k0))*factor;
float_sw4 duzdy = (U[g0](3,i0,j0+1,k0) - U[g0](3,i0,j0-1,k0))*factor;
float_sw4 duxdz = (U[g0](1,i0,j0,k0+1) - U[g0](1,i0,j0,k0-1))*factor;
float_sw4 duydz = (U[g0](2,i0,j0,k0+1) - U[g0](2,i0,j0,k0-1))*factor;
float_sw4 duxdx = (U[g0](1,i0+1,j0,k0) - U[g0](1,i0-1,j0,k0))*factor;
float_sw4 duydy = (U[g0](2,i0,j0+1,k0) - U[g0](2,i0,j0-1,k0))*factor;
float_sw4 duzdz = (U[g0](3,i0,j0,k0+1) - U[g0](3,i0,j0,k0-1))*factor;
uRec[0] = duxdx;
uRec[1] = duxdy;
uRec[2] = duxdz;
uRec[3] = duydx;
uRec[4] = duydy;
uRec[5] = duydz;
uRec[6] = duzdx;
uRec[7] = duzdy;
uRec[8] = duzdz;
}
else // must be curvilinear
{
// int i=m_i0, j=m_j0, k0=m_k00, g0=m_grid0;
float_sw4 factor = 0.5/sqrt(mJ(i0,j0,k0));
float_sw4 duxdq = (U[g0](1,i0+1,j0,k0) - U[g0](1,i0-1,j0,k0));
float_sw4 duydq = (U[g0](2,i0+1,j0,k0) - U[g0](2,i0-1,j0,k0));
float_sw4 duzdq = (U[g0](3,i0+1,j0,k0) - U[g0](3,i0-1,j0,k0));
float_sw4 duxdr = (U[g0](1,i0,j0+1,k0) - U[g0](1,i0,j0-1,k0));
float_sw4 duydr = (U[g0](2,i0,j0+1,k0) - U[g0](2,i0,j0-1,k0));
float_sw4 duzdr = (U[g0](3,i0,j0+1,k0) - U[g0](3,i0,j0-1,k0));
float_sw4 duxds = (U[g0](1,i0,j0,k0+1) - U[g0](1,i0,j0,k0-1));
float_sw4 duyds = (U[g0](2,i0,j0,k0+1) - U[g0](2,i0,j0,k0-1));
float_sw4 duzds = (U[g0](3,i0,j0,k0+1) - U[g0](3,i0,j0,k0-1));
float_sw4 duzdy = (mMetric(1,i0,j0,k0)*duzdr+mMetric(3,i0,j0,k0)*duzds)*factor;
float_sw4 duydz = (mMetric(4,i0,j0,k0)*duyds)*factor;
float_sw4 duxdz = (mMetric(4,i0,j0,k0)*duxds)*factor;
float_sw4 duzdx = (mMetric(1,i0,j0,k0)*duzdq+mMetric(2,i0,j0,k0)*duzds)*factor;
float_sw4 duydx = (mMetric(1,i0,j0,k0)*duydq+mMetric(2,i0,j0,k0)*duyds)*factor;
float_sw4 duxdy = (mMetric(1,i0,j0,k0)*duxdr+mMetric(3,i0,j0,k0)*duxds)*factor;
float_sw4 duxdx = ( mMetric(1,i0,j0,k0)*(U[g0](1,i0+1,j0,k0) - U[g0](1,i0-1,j0,k0))+
mMetric(2,i0,j0,k0)*(U[g0](1,i0,j0,k0+1) - U[g0](1,i0,j0,k0-1)) )*factor;
float_sw4 duydy = ( mMetric(1,i0,j0,k0)*(U[g0](2,i0,j0+1,k0) - U[g0](2,i0,j0-1,k0))+
mMetric(3,i0,j0,k0)*(U[g0](2,i0,j0,k0+1) - U[g0](2,i0,j0,k0-1)) )*factor;
float_sw4 duzdz = ( mMetric(4,i0,j0,k0)*(U[g0](3,i0,j0,k0+1) - U[g0](3,i0,j0,k0-1)) )*factor;
uRec[0] = duxdx;
uRec[1] = duxdy;
uRec[2] = duxdz;
uRec[3] = duydx;
uRec[4] = duydy;
uRec[5] = duydz;
uRec[6] = duzdx;
uRec[7] = duzdy;
uRec[8] = duzdz;
}
} // end DisplacementGradient
return;
}
//-----------------------------------------------------------------------
void EW::default_bcs( )
{
for( int side=0 ; side < 6 ; side++ )
mbcGlobalType[side] = bSuperGrid;
mbcGlobalType[4] = bStressFree; // low-z is normally free surface
}
//-----------------------------------------------------------------------
void EW::buildGaussianHillTopography(float_sw4 amp, float_sw4 Lx, float_sw4 Ly, float_sw4 x0, float_sw4 y0)
{
if (mVerbose >= 1 && (m_myrank == 0 ) )
cout << "***inside buildGaussianHillTopography***"<< endl;
#define SQR(x) (x)*(x)
int topLevel = mNumberOfGrids-1;
float_sw4 x, y;
// copy data
m_analytical_topo = true;
// m_analytical_topo = false;
m_GaussianAmp = amp;
m_GaussianLx = Lx;
m_GaussianLy = Ly;
m_GaussianXc = x0;
m_GaussianYc = y0;
for (int i = m_iStart[topLevel]; i <= m_iEnd[topLevel]; ++i)
for (int j = m_jStart[topLevel]; j <= m_jEnd[topLevel]; ++j)
{
x = (i-1)*mGridSize[topLevel];
y = (j-1)*mGridSize[topLevel];
// positive topography is up (negative z)
mTopo(i,j,1) = m_GaussianAmp*exp(-SQR((x-m_GaussianXc)/m_GaussianLx)
-SQR((y-m_GaussianYc)/m_GaussianLy));
}
for (int i = mTopoGridExt.m_ib ; i <= mTopoGridExt.m_ie ; ++i)
for (int j = mTopoGridExt.m_jb ; j <= mTopoGridExt.m_je; ++j)
{
x = (i-1)*mGridSize[topLevel];
y = (j-1)*mGridSize[topLevel];
// positive topography is up (negative z)
mTopoGridExt(i,j,1) = m_GaussianAmp*exp(-SQR((x-m_GaussianXc)/m_GaussianLx)
-SQR((y-m_GaussianYc)/m_GaussianLy));
}
#undef SQR
}
//-----------------------------------------------------------------------
void EW::compute_minmax_topography( float_sw4& topo_zmin, float_sw4& topo_zmax )
{
if( m_topography_exists )
{
int g = mNumberOfGrids-1;
int i=m_iStart[g], j=m_jEnd[g];
// The z-coordinate points downwards, so positive topography (above sea level)
// gets negative z-values
float_sw4 zMinLocal, zMaxLocal;
zMaxLocal = zMinLocal = -mTopoGridExt(i,j,1);
int imin = mTopoGridExt.m_ib;
int imax = mTopoGridExt.m_ie;
int jmin = mTopoGridExt.m_jb;
int jmax = mTopoGridExt.m_je;
for (i= imin ; i<=imax ; i++)
for (j=jmin; j<=jmax ; j++)
{
if (-mTopoGridExt(i,j,1) > zMaxLocal)
{
zMaxLocal = -mTopoGridExt(i,j,1);
}
if (-mTopoGridExt(i,j,1) < zMinLocal)
{
zMinLocal = -mTopoGridExt(i,j,1);
}
}
MPI_Allreduce( &zMinLocal, &topo_zmin, 1, m_mpifloat, MPI_MIN, m_cartesian_communicator);
MPI_Allreduce( &zMaxLocal, &topo_zmax, 1, m_mpifloat, MPI_MAX, m_cartesian_communicator);
}
else
{
topo_zmin = topo_zmax = 0;
}
}
//-----------------------------------------------------------------------
void EW::generate_grid()
{
// Generate grid on domain: topography <= z <= zmax,
// The 2D grid on z=zmax, is given by ifirst <= i <= ilast, jfirst <= j <= jlast
// spacing h.
if (!m_topography_exists ) return;
// m_grid_interpolation_order = a_order;
if (mVerbose >= 1 && (m_myrank==0) )
cout << "***inside generate_grid***"<< endl;
// get the size from the top Cartesian grid
int g = mNumberOfCartesianGrids-1;
int ifirst = m_iStart[g];
int ilast = m_iEnd[g];
int jfirst = m_jStart[g];
int jlast = m_jEnd[g];
float_sw4 h = mGridSize[g]; // grid size must agree with top cartesian grid
float_sw4 zMaxCart = m_zmin[g]; // bottom z-level for curvilinear grid
int i, j;
int gTop = mNumberOfGrids-1;
int Nz = m_kEnd[gTop] - m_ghost_points;
if(mVerbose > 4 && (m_myrank == 0 ) )
{
printf("generate_grid: Number of grid points in curvilinear grid = %i, kStart = %i, kEnd = %i\n",
Nz, m_kStart[gTop], m_kEnd[gTop]);
}
// generate the grid by calling the curvilinear mapping function
float_sw4 X0, Y0, Z0;
int k;
for (k=m_kStart[gTop]; k<=m_kEnd[gTop]; k++)
for (j=m_jStart[gTop]; j<=m_jEnd[gTop]; j++)
for (i=m_iStart[gTop]; i<=m_iEnd[gTop]; i++)
{
grid_mapping((float_sw4) i, (float_sw4) j, (float_sw4) k, X0, Y0, Z0);
mX(i,j,k) = X0;
mY(i,j,k) = Y0;
mZ(i,j,k) = Z0;
}
communicate_array( mZ, gTop );
// calculate min and max((mZ(i,j,k)-mZ(i,j,k-1))/h) for k=Nz
k = Nz;
float_sw4 hRatio;
float_sw4 mZmin = 1.0e9, mZmax=0;
for (j=m_jStart[gTop]; j<=m_jEnd[gTop]; j++)
for (i=m_iStart[gTop]; i<=m_iEnd[gTop]; i++)
{
hRatio = (mZ(i,j,k)-mZ(i,j,k-1))/mGridSize[gTop];
if (hRatio < mZmin) mZmin = hRatio;
if (hRatio > mZmax) mZmax = hRatio;
}
float_sw4 zMinGlobal, zMaxGlobal;
MPI_Allreduce( &mZmin, &zMinGlobal, 1, m_mpifloat, MPI_MIN, m_cartesian_communicator);
MPI_Allreduce( &mZmax, &zMaxGlobal, 1, m_mpifloat, MPI_MAX, m_cartesian_communicator);
if(mVerbose > 3 && (m_myrank == 0) )
{
printf("Curvilinear/Cartesian interface (k=Nz-1): Min grid size ratio - 1 = %e, max ratio z - 1 = %e, top grid # = %i\n",
zMinGlobal-1., zMaxGlobal-1., gTop);
}
}
//---------------------------------------------------------
void EW::setup_metric()
{
if (!m_topography_exists ) return;
if (mVerbose >= 1 && (m_myrank == 0))
cout << "***inside setup_metric***"<< endl;
int g=mNumberOfGrids-1;
int Bx=m_iStart[g];
int By=m_jStart[g];
int Bz=m_kStart[g];
int Nx=m_iEnd[g];
int Ny=m_jEnd[g];
int Nz=m_kEnd[g];
if( m_analytical_topo && m_use_analytical_metric )
{
// Gaussian hill topography, analytical expressions for metric derivatives.
int nxg = m_global_nx[g];
int nyg = m_global_ny[g];
int nzg = m_global_nz[g];
float_sw4 h= mGridSize[g];
float_sw4 zmax = m_zmin[g-1] - (nzg-1)*h*(1-m_zetaBreak);
if( m_corder )
metricexgh_rev( m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g], m_kStart[g], m_kEnd[g],
m_global_nz[g], mX.c_ptr(), mY.c_ptr(), mZ.c_ptr(), mMetric.c_ptr(), mJ.c_ptr(),
m_grid_interpolation_order, m_zetaBreak, zmax, m_GaussianAmp, m_GaussianXc,
m_GaussianYc, m_GaussianLx, m_GaussianLy );
else
metricexgh( m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g], m_kStart[g], m_kEnd[g],
m_global_nz[g], mX.c_ptr(), mY.c_ptr(), mZ.c_ptr(), mMetric.c_ptr(), mJ.c_ptr(),
m_grid_interpolation_order, m_zetaBreak, zmax, m_GaussianAmp, m_GaussianXc,
m_GaussianYc, m_GaussianLx, m_GaussianLy );
}
else
{
int ierr=0;
if( m_corder )
ierr = metric_rev( m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g], m_kStart[g], m_kEnd[g],
mX.c_ptr(), mY.c_ptr(), mZ.c_ptr(), mMetric.c_ptr(), mJ.c_ptr() );
else
ierr = metric( m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g], m_kStart[g], m_kEnd[g],
mX.c_ptr(), mY.c_ptr(), mZ.c_ptr(), mMetric.c_ptr(), mJ.c_ptr() );
CHECK_INPUT(ierr==0, "Problems calculating the metric coefficients");
}
communicate_array( mMetric, mNumberOfGrids-1 );
communicate_array( mJ, mNumberOfGrids-1 );
// if( m_analytical_topo && !m_use_analytical_metric && mVerbose > 3 )
// // Test metric derivatives if available
// metric_derivatives_test( );
float_sw4 minJ, maxJ;
gridinfo( m_iStart[g], m_iEnd[g], m_jStart[g], m_jEnd[g], m_kStart[g], m_kEnd[g],
mMetric.c_ptr(), mJ.c_ptr(), minJ, maxJ );
float_sw4 minJglobal, maxJglobal;
MPI_Allreduce( &minJ, &minJglobal, 1, m_mpifloat, MPI_MIN, m_cartesian_communicator);
MPI_Allreduce( &maxJ, &maxJglobal, 1, m_mpifloat, MPI_MAX, m_cartesian_communicator);
if (mVerbose>3 && (m_myrank == 0))
printf("*** Jacobian of metric: minJ = %e maxJ = %e\n", minJglobal, maxJglobal);
}
//-----------------------------------------------------------------------
bool EW::find_topo_zcoord_owner( float_sw4 X, float_sw4 Y, float_sw4& Ztopo )
{
bool success = true;
if ( m_topography_exists )
{
float_sw4 h = mGridSize[mNumberOfGrids-1];
float_sw4 q, r;
q = X/h + 1.0;
r = Y/h + 1.0;
// evaluate elevation of topography on the grid
if (!interpolate_topography(q, r, Ztopo, true))
{
cerr << "Unable to evaluate topography at" << " X= " << X << " Y= " << Y << endl;
cerr << "Setting topography to ZERO" << endl;
Ztopo = 0;
success = false;
}
}
else
{
Ztopo = 0; // no topography
}
return success;
}
//-----------------------------------------------------------------------
bool EW::find_topo_zcoord_all( float_sw4 X, float_sw4 Y, float_sw4& Ztopo )
{
bool success = true;
if (m_topography_exists )
{
float_sw4 h = mGridSize[mNumberOfGrids-1];
float_sw4 q, r;
q = X/h + 1.0;
r = Y/h + 1.0;
float_sw4 Ztopoloc;
// evaluate elevation of topography on the grid
if (!interpolate_topography(q, r, Ztopoloc, true))
{
Ztopoloc = -1e38;
}
MPI_Allreduce( &Ztopoloc, &Ztopo, 1, m_mpifloat, MPI_MAX, m_cartesian_communicator );
success = Ztopo > -1e38;
}
else
{
Ztopo = 0; // no topography
success = true;
}
return success;
}
//-----------------------------------------------------------------------
bool less_than( GridPointSource* ptsrc1, GridPointSource* ptsrc2 )
{
return ptsrc1->m_key < ptsrc2->m_key;
}
//-----------------------------------------------------------------------
void EW::sort_grid_point_sources()
{
size_t* gptr = new size_t[mNumberOfGrids];
gptr[0] = 0;
for(int g=0 ; g < mNumberOfGrids-1 ; g++ )
{
gptr[g+1] = gptr[g] + static_cast<size_t>((m_iEnd[g]-m_iStart[g]+1))*
(m_jEnd[g]-m_jStart[g]+1)*(m_kEnd[g]-m_kStart[g]+1);
}
size_t* ni = new size_t[mNumberOfGrids];
size_t* nij = new size_t[mNumberOfGrids];
for(int g=0 ; g < mNumberOfGrids ; g++ )
{
ni[g] = (m_iEnd[g]-m_iStart[g]+1);
nij[g] = ni[g]*(m_jEnd[g]-m_jStart[g]+1);
}
for( int s=0 ; s < m_point_sources.size() ; s++ )
{
int g = m_point_sources[s]->m_grid;
size_t key = gptr[g] + (m_point_sources[s]->m_i0-m_iStart[g]) +
ni[g]*(m_point_sources[s]->m_j0-m_jStart[g]) +
nij[g]*(m_point_sources[s]->m_k0-m_kStart[g]);
m_point_sources[s]->set_sort_key(key);
}
delete[] gptr;
delete[] ni;
delete[] nij;
std::sort(m_point_sources.begin(), m_point_sources.end(), less_than );
// set up array detecting sources belonging to idential points
m_identsources.resize(1);
m_identsources[0] = 0;
int k = 0;
while( m_identsources[k] < m_point_sources.size() )
{
int m = m_identsources[k];
size_t key = m_point_sources[m]->m_key;
while( m+1 < m_point_sources.size() && m_point_sources[m+1]->m_key == key )
m++;
m_identsources.push_back(m+1);
k++;
}
// Test
int nrsrc =m_point_sources.size();
int nrunique = m_identsources.size()-1;
int nrsrctot, nruniquetot;
MPI_Reduce( &nrsrc, &nrsrctot, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD );
MPI_Reduce( &nrunique, &nruniquetot, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD );
if( m_myrank == 0 )
{
cout << "number of grid point sources = " << nrsrctot << endl;
cout << "number of unique g.p. sources = " << nruniquetot << endl;
}
}
//-----------------------------------------------------------------------
void EW::copy_point_sources_to_gpu()
{
// new code, redefined dev_point_sources to be a GridPointSource* to
// be able to copy the sources to device as an array instead of copying
// them one by one.
hipError_t retcode=hipMalloc( (void**)&dev_point_sources, sizeof(GridPointSource)*m_point_sources.size());
if( hipSuccess != retcode )
cout << "Error EW::copy_point_sources_to_gpu, hipMalloc, 1, retcode = " <<
hipGetErrorString(retcode) << endl;
GridPointSource* hsources = new GridPointSource[m_point_sources.size()];
for( int s=0 ; s < m_point_sources.size() ; s++ )
hsources[s] = *(m_point_sources[s]);
retcode = hipMemcpy( dev_point_sources, hsources,
m_point_sources.size()*sizeof(GridPointSource),
hipMemcpyHostToDevice );
if( hipSuccess != retcode )
cout << "Error EW::copy_point_sources_to_gpu, hipMemcpy, 1, retcode = " <<
hipGetErrorString(retcode) << endl;
retcode = hipMalloc( (void**)&dev_identsources, sizeof(int)*m_identsources.size() );
if( hipSuccess != retcode )
cout << "Error EW::copy_point_sources_to_gpu, hipMalloc, 2, retcode = " <<
hipGetErrorString(retcode) << endl;
retcode = hipMemcpy( dev_identsources, &m_identsources[0], sizeof(int)*m_identsources.size(), hipMemcpyHostToDevice );
if( hipSuccess != retcode )
cout << "Error EW::copy_point_sources_to_gpu, hipMemcpy, 2, retcode = " <<
hipGetErrorString(retcode) << endl;
delete[] hsources;
}
//-----------------------------------------------------------------------
void EW::CheckCudaCall(hipError_t command, const char * commandName, const char * fileName, int line)
{
if (command != hipSuccess)
{
fprintf(stderr, "Error: CUDA result \"%s\" for call \"%s\" in file \"%s\" at line %d. Terminating...\n",
hipGetErrorString(command), commandName, fileName, line);
exit(1);
}
}
|
pi02.c | #include <omp.h>
static long num_steps = 100000;
double step;
#define NUM_THREADS 4
void main ()
{ int i;
double x, pi, sum[NUM_THREADS];
step = 1.0/(double) num_steps;
omp_set_num_threads(NUM_THREADS);
#pragma omp parallel private(i)
{
double x;
int id;
id = omp_get_thread_num();
for (i=id, sum[id]=0.0;i< num_steps; i=i+NUM_THREADS){
x = (i+0.5)*step;
sum[id] += 4.0/(1.0+x*x);
}
}
for(i=0, pi=0.0;i<NUM_THREADS;i++)pi += sum[i] * step;
printf("Pi = %lf\n",pi);
}
|
GB_unop__lnot_int32_int32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__lnot_int32_int32
// op(A') function: GB_unop_tran__lnot_int32_int32
// C type: int32_t
// A type: int32_t
// cast: int32_t cij = aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CAST(z, aij) \
int32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int32_t z = aij ; \
Cx [pC] = !(z != 0) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__lnot_int32_int32
(
int32_t *Cx, // Cx and Ax may be aliased
const int32_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t aij = Ax [p] ;
int32_t z = aij ;
Cx [p] = !(z != 0) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int32_t aij = Ax [p] ;
int32_t z = aij ;
Cx [p] = !(z != 0) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__lnot_int32_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
kernel_cpu.c | #ifdef __cplusplus
extern "C" {
#endif
//========================================================================================================================================================================================================200
// DEFINE/INCLUDE
//========================================================================================================================================================================================================200
//======================================================================================================================================================150
// LIBRARIES
//======================================================================================================================================================150
//#include <omp.h> // (in path known to compiler) needed by openmp
#include <stdlib.h> // (in path known to compiler) needed by malloc
#include <stdio.h> // (in path known to compiler) needed by printf
#include <math.h> // (in path known to compiler) needed by exp
//======================================================================================================================================================150
// MAIN FUNCTION HEADER
//======================================================================================================================================================150
#include "./../main.h" // (in the main program folder) needed to recognized input variables
//======================================================================================================================================================150
// UTILITIES
//======================================================================================================================================================150
#include "./../util/timer/timer.h" // (in library path specified to compiler) needed by timer
//======================================================================================================================================================150
// KERNEL_CPU FUNCTION HEADER
//======================================================================================================================================================150
#include "kernel_cpu.h" // (in the current directory)
//========================================================================================================================================================================================================200
// PLASMAKERNEL_GPU
//========================================================================================================================================================================================================200
void kernel_cpu( par_str par,
dim_str dim,
box_str* box,
FOUR_VECTOR* rv,
fp* qv,
FOUR_VECTOR* fv)
{
//======================================================================================================================================================150
// Variables
//======================================================================================================================================================150
// timer
long long time0;
time0 = get_time();
// timer
long long time1;
long long time2;
long long time3;
long long time4;
// parameters
fp alpha;
fp a2;
// counters
int i, j, k, l;
// home box
long first_i;
FOUR_VECTOR* rA;
FOUR_VECTOR* fA;
// neighbor box
int pointer;
long first_j;
FOUR_VECTOR* rB;
fp* qB;
// common
fp r2;
fp u2;
fp fs;
fp vij;
fp fxij,fyij,fzij;
THREE_VECTOR d;
time1 = get_time();
//======================================================================================================================================================150
// MCPU SETUP
//======================================================================================================================================================150
//omp_set_num_threads(dim.cores_arg);
time2 = get_time();
//======================================================================================================================================================150
// INPUTS
//======================================================================================================================================================150
alpha = par.alpha;
a2 = 2.0*alpha*alpha;
time3 = get_time();
//======================================================================================================================================================150
// PROCESS INTERACTIONS
//======================================================================================================================================================150
printf("the value of NUMBER_PAR_PER_BOX is: %d \n", NUMBER_PAR_PER_BOX);
//#pragma omp parallel for \
private(i, j, k) \
private(first_i, rA, fA) \
private(pointer, first_j, rB, qB) \
private(r2, u2, fs, vij, fxij, fyij, fzij, d)
for(l=0; l<dim.number_boxes; l=l+1){
//------------------------------------------------------------------------------------------100
// home box - box parameters
//------------------------------------------------------------------------------------------100
first_i = box[l].offset; // offset to common arrays
//------------------------------------------------------------------------------------------100
// home box - distance, force, charge and type parameters from common arrays
//------------------------------------------------------------------------------------------100
rA = &rv[first_i];
fA = &fv[first_i];
//------------------------------------------------------------------------------------------100
// Do for the # of (home+neighbor) boxes
//------------------------------------------------------------------------------------------100
for (k=0; k<(1+box[l].nn); k++)
{
//----------------------------------------50
// neighbor box - get pointer to the right box
//----------------------------------------50
if(k==0){
pointer = l; // set first box to be processed to home box
}
else{
pointer = box[l].nei[k-1].number; // remaining boxes are neighbor boxes
}
//----------------------------------------50
// neighbor box - box parameters
//----------------------------------------50
first_j = box[pointer].offset;
//----------------------------------------50
// neighbor box - distance, force, charge and type parameters
//----------------------------------------50
rB = &rv[first_j];
qB = &qv[first_j];
//----------------------------------------50
// Do for the # of particles in home box
//----------------------------------------50
for (i=0; i<NUMBER_PAR_PER_BOX; i=i+1){
// do for the # of particles in current (home or neighbor) box
for (j=0; j<NUMBER_PAR_PER_BOX; j=j+1){
// // coefficients
r2 = rA[i].v + rB[j].v - DOT(rA[i],rB[j]);
u2 = a2*r2;
vij= exp(-u2);
fs = 2.*vij;
d.x = rA[i].x - rB[j].x;
d.y = rA[i].y - rB[j].y;
d.z = rA[i].z - rB[j].z;
fxij=fs*d.x;
fyij=fs*d.y;
fzij=fs*d.z;
// forces
fA[i].v += qB[j]*vij;
fA[i].x += qB[j]*fxij;
fA[i].y += qB[j]*fyij;
fA[i].z += qB[j]*fzij;
} // for j
} // for i
} // for k
} // for l
time4 = get_time();
//======================================================================================================================================================150
// DISPLAY TIMING
//======================================================================================================================================================150
printf("Time spent in different stages of CPU/MCPU KERNEL:\n");
printf("%15.12f s, %15.12f % : CPU/MCPU: VARIABLES\n", (float) (time1-time0) / 1000000, (float) (time1-time0) / (float) (time4-time0) * 100);
printf("%15.12f s, %15.12f % : MCPU: SET DEVICE\n", (float) (time2-time1) / 1000000, (float) (time2-time1) / (float) (time4-time0) * 100);
printf("%15.12f s, %15.12f % : CPU/MCPU: INPUTS\n", (float) (time3-time2) / 1000000, (float) (time3-time2) / (float) (time4-time0) * 100);
printf("%15.12f s, %15.12f % : CPU/MCPU: KERNEL\n", (float) (time4-time3) / 1000000, (float) (time4-time3) / (float) (time4-time0) * 100);
printf("Total time:\n");
printf("%.12f s\n", (float) (time4-time0) / 1000000);
} // main
#ifdef __cplusplus
}
#endif
|
gbdt.h | /*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifndef LIGHTGBM_BOOSTING_GBDT_H_
#define LIGHTGBM_BOOSTING_GBDT_H_
#include <LightGBM/boosting.h>
#include <LightGBM/objective_function.h>
#include <LightGBM/prediction_early_stop.h>
#include <LightGBM/cuda/vector_cudahost.h>
#include <LightGBM/utils/json11.h>
#include <LightGBM/utils/threading.h>
#include <string>
#include <algorithm>
#include <cstdio>
#include <fstream>
#include <map>
#include <memory>
#include <mutex>
#include <unordered_map>
#include <utility>
#include <vector>
#include "score_updater.hpp"
namespace LightGBM {
using json11::Json;
/*!
* \brief GBDT algorithm implementation. including Training, prediction, bagging.
*/
class GBDT : public GBDTBase {
public:
/*!
* \brief Constructor
*/
GBDT();
/*!
* \brief Destructor
*/
~GBDT();
/*!
* \brief Initialization logic
* \param gbdt_config Config for boosting
* \param train_data Training data
* \param objective_function Training objective function
* \param training_metrics Training metrics
*/
void Init(const Config* gbdt_config, const Dataset* train_data,
const ObjectiveFunction* objective_function,
const std::vector<const Metric*>& training_metrics) override;
/*!
* \brief Merge model from other boosting object. Will insert to the front of current boosting object
* \param other
*/
void MergeFrom(const Boosting* other) override {
auto other_gbdt = reinterpret_cast<const GBDT*>(other);
// tmp move to other vector
auto original_models = std::move(models_);
models_ = std::vector<std::unique_ptr<Tree>>();
// push model from other first
for (const auto& tree : other_gbdt->models_) {
auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get())));
models_.push_back(std::move(new_tree));
}
num_init_iteration_ = static_cast<int>(models_.size()) / num_tree_per_iteration_;
// push model in current object
for (const auto& tree : original_models) {
auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get())));
models_.push_back(std::move(new_tree));
}
num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_;
}
void ShuffleModels(int start_iter, int end_iter) override {
int total_iter = static_cast<int>(models_.size()) / num_tree_per_iteration_;
start_iter = std::max(0, start_iter);
if (end_iter <= 0) {
end_iter = total_iter;
}
end_iter = std::min(total_iter, end_iter);
auto original_models = std::move(models_);
std::vector<int> indices(total_iter);
for (int i = 0; i < total_iter; ++i) {
indices[i] = i;
}
Random tmp_rand(17);
for (int i = start_iter; i < end_iter - 1; ++i) {
int j = tmp_rand.NextShort(i + 1, end_iter);
std::swap(indices[i], indices[j]);
}
models_ = std::vector<std::unique_ptr<Tree>>();
for (int i = 0; i < total_iter; ++i) {
for (int j = 0; j < num_tree_per_iteration_; ++j) {
int tree_idx = indices[i] * num_tree_per_iteration_ + j;
auto new_tree = std::unique_ptr<Tree>(new Tree(*(original_models[tree_idx].get())));
models_.push_back(std::move(new_tree));
}
}
}
/*!
* \brief Reset the training data
* \param train_data New Training data
* \param objective_function Training objective function
* \param training_metrics Training metrics
*/
void ResetTrainingData(const Dataset* train_data, const ObjectiveFunction* objective_function,
const std::vector<const Metric*>& training_metrics) override;
/*!
* \brief Reset Boosting Config
* \param gbdt_config Config for boosting
*/
void ResetConfig(const Config* gbdt_config) override;
/*!
* \brief Adding a validation dataset
* \param valid_data Validation dataset
* \param valid_metrics Metrics for validation dataset
*/
void AddValidDataset(const Dataset* valid_data,
const std::vector<const Metric*>& valid_metrics) override;
/*!
* \brief Perform a full training procedure
* \param snapshot_freq frequency of snapshot
* \param model_output_path path of model file
*/
void Train(int snapshot_freq, const std::string& model_output_path) override;
void RefitTree(const std::vector<std::vector<int>>& tree_leaf_prediction) override;
/*!
* \brief Training logic
* \param gradients nullptr for using default objective, otherwise use self-defined boosting
* \param hessians nullptr for using default objective, otherwise use self-defined boosting
* \return True if cannot train any more
*/
bool TrainOneIter(const score_t* gradients, const score_t* hessians) override;
/*!
* \brief Rollback one iteration
*/
void RollbackOneIter() override;
/*!
* \brief Get current iteration
*/
int GetCurrentIteration() const override { return static_cast<int>(models_.size()) / num_tree_per_iteration_; }
/*!
* \brief Can use early stopping for prediction or not
* \return True if cannot use early stopping for prediction
*/
bool NeedAccuratePrediction() const override {
if (objective_function_ == nullptr) {
return true;
} else {
return objective_function_->NeedAccuratePrediction();
}
}
/*!
* \brief Get evaluation result at data_idx data
* \param data_idx 0: training data, 1: 1st validation data
* \return evaluation result
*/
std::vector<double> GetEvalAt(int data_idx) const override;
/*!
* \brief Get current training score
* \param out_len length of returned score
* \return training score
*/
const double* GetTrainingScore(int64_t* out_len) override;
/*!
* \brief Get size of prediction at data_idx data
* \param data_idx 0: training data, 1: 1st validation data
* \return The size of prediction
*/
int64_t GetNumPredictAt(int data_idx) const override {
CHECK(data_idx >= 0 && data_idx <= static_cast<int>(valid_score_updater_.size()));
data_size_t num_data = train_data_->num_data();
if (data_idx > 0) {
num_data = valid_score_updater_[data_idx - 1]->num_data();
}
return static_cast<int64_t>(num_data) * num_class_;
}
/*!
* \brief Get prediction result at data_idx data
* \param data_idx 0: training data, 1: 1st validation data
* \param result used to store prediction result, should allocate memory before call this function
* \param out_len length of returned score
*/
void GetPredictAt(int data_idx, double* out_result, int64_t* out_len) override;
/*!
* \brief Get number of prediction for one data
* \param start_iteration Start index of the iteration to predict
* \param num_iteration number of used iterations
* \param is_pred_leaf True if predicting leaf index
* \param is_pred_contrib True if predicting feature contribution
* \return number of prediction
*/
inline int NumPredictOneRow(int start_iteration, int num_iteration, bool is_pred_leaf, bool is_pred_contrib) const override {
int num_pred_in_one_row = num_class_;
if (is_pred_leaf) {
int max_iteration = GetCurrentIteration();
start_iteration = std::max(start_iteration, 0);
start_iteration = std::min(start_iteration, max_iteration);
if (num_iteration > 0) {
num_pred_in_one_row *= static_cast<int>(std::min(max_iteration - start_iteration, num_iteration));
} else {
num_pred_in_one_row *= (max_iteration - start_iteration);
}
} else if (is_pred_contrib) {
num_pred_in_one_row = num_tree_per_iteration_ * (max_feature_idx_ + 2); // +1 for 0-based indexing, +1 for baseline
}
return num_pred_in_one_row;
}
void PredictRaw(const double* features, double* output,
const PredictionEarlyStopInstance* earlyStop) const override;
void PredictRawByMap(const std::unordered_map<int, double>& features, double* output,
const PredictionEarlyStopInstance* early_stop) const override;
void Predict(const double* features, double* output,
const PredictionEarlyStopInstance* earlyStop) const override;
void PredictByMap(const std::unordered_map<int, double>& features, double* output,
const PredictionEarlyStopInstance* early_stop) const override;
void PredictLeafIndex(const double* features, double* output) const override;
void PredictLeafIndexByMap(const std::unordered_map<int, double>& features, double* output) const override;
void PredictContrib(const double* features, double* output) const override;
void PredictContribByMap(const std::unordered_map<int, double>& features,
std::vector<std::unordered_map<int, double>>* output) const override;
/*!
* \brief Dump model to json format string
* \param start_iteration The model will be saved start from
* \param num_iteration Number of iterations that want to dump, -1 means dump all
* \param feature_importance_type Type of feature importance, 0: split, 1: gain
* \return Json format string of model
*/
std::string DumpModel(int start_iteration, int num_iteration,
int feature_importance_type) const override;
/*!
* \brief Translate model to if-else statement
* \param num_iteration Number of iterations that want to translate, -1 means translate all
* \return if-else format codes of model
*/
std::string ModelToIfElse(int num_iteration) const override;
/*!
* \brief Translate model to if-else statement
* \param num_iteration Number of iterations that want to translate, -1 means translate all
* \param filename Filename that want to save to
* \return is_finish Is training finished or not
*/
bool SaveModelToIfElse(int num_iteration, const char* filename) const override;
/*!
* \brief Save model to file
* \param start_iteration The model will be saved start from
* \param num_iterations Number of model that want to save, -1 means save all
* \param feature_importance_type Type of feature importance, 0: split, 1: gain
* \param filename Filename that want to save to
* \return is_finish Is training finished or not
*/
bool SaveModelToFile(int start_iteration, int num_iterations,
int feature_importance_type,
const char* filename) const override;
/*!
* \brief Save model to string
* \param start_iteration The model will be saved start from
* \param num_iterations Number of model that want to save, -1 means save all
* \param feature_importance_type Type of feature importance, 0: split, 1: gain
* \return Non-empty string if succeeded
*/
std::string SaveModelToString(int start_iteration, int num_iterations, int feature_importance_type) const override;
/*!
* \brief Restore from a serialized buffer
*/
bool LoadModelFromString(const char* buffer, size_t len) override;
/*!
* \brief Calculate feature importances
* \param num_iteration Number of model that want to use for feature importance, -1 means use all
* \param importance_type: 0 for split, 1 for gain
* \return vector of feature_importance
*/
std::vector<double> FeatureImportance(int num_iteration, int importance_type) const override;
/*!
* \brief Calculate upper bound value
* \return upper bound value
*/
double GetUpperBoundValue() const override;
/*!
* \brief Calculate lower bound value
* \return lower bound value
*/
double GetLowerBoundValue() const override;
/*!
* \brief Get max feature index of this model
* \return Max feature index of this model
*/
inline int MaxFeatureIdx() const override { return max_feature_idx_; }
/*!
* \brief Get feature names of this model
* \return Feature names of this model
*/
inline std::vector<std::string> FeatureNames() const override { return feature_names_; }
/*!
* \brief Get index of label column
* \return index of label column
*/
inline int LabelIdx() const override { return label_idx_; }
/*!
* \brief Get number of weak sub-models
* \return Number of weak sub-models
*/
inline int NumberOfTotalModel() const override { return static_cast<int>(models_.size()); }
/*!
* \brief Get number of tree per iteration
* \return number of tree per iteration
*/
inline int NumModelPerIteration() const override { return num_tree_per_iteration_; }
/*!
* \brief Get number of classes
* \return Number of classes
*/
inline int NumberOfClasses() const override { return num_class_; }
inline void InitPredict(int start_iteration, int num_iteration, bool is_pred_contrib) override {
num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_;
start_iteration = std::max(start_iteration, 0);
start_iteration = std::min(start_iteration, num_iteration_for_pred_);
if (num_iteration > 0) {
num_iteration_for_pred_ = std::min(num_iteration, num_iteration_for_pred_ - start_iteration);
} else {
num_iteration_for_pred_ = num_iteration_for_pred_ - start_iteration;
}
start_iteration_for_pred_ = start_iteration;
if (is_pred_contrib) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < static_cast<int>(models_.size()); ++i) {
models_[i]->RecomputeMaxDepth();
}
}
}
inline double GetLeafValue(int tree_idx, int leaf_idx) const override {
CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size());
CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves());
return models_[tree_idx]->LeafOutput(leaf_idx);
}
inline void SetLeafValue(int tree_idx, int leaf_idx, double val) override {
CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size());
CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves());
models_[tree_idx]->SetLeafOutput(leaf_idx, val);
}
/*!
* \brief Get Type name of this boosting object
*/
const char* SubModelName() const override { return "tree"; }
bool IsLinear() const override { return linear_tree_; }
inline std::string ParserConfigStr() const override {return parser_config_str_;}
protected:
virtual bool GetIsConstHessian(const ObjectiveFunction* objective_function) {
if (objective_function != nullptr) {
return objective_function->IsConstantHessian();
} else {
return false;
}
}
/*!
* \brief Print eval result and check early stopping
*/
virtual bool EvalAndCheckEarlyStopping();
/*!
* \brief reset config for bagging
*/
void ResetBaggingConfig(const Config* config, bool is_change_dataset);
/*!
* \brief Implement bagging logic
* \param iter Current interation
*/
virtual void Bagging(int iter);
virtual data_size_t BaggingHelper(data_size_t start, data_size_t cnt,
data_size_t* buffer);
data_size_t BalancedBaggingHelper(data_size_t start, data_size_t cnt,
data_size_t* buffer);
/*!
* \brief calculate the object function
*/
virtual void Boosting();
/*!
* \brief updating score after tree was trained
* \param tree Trained tree of this iteration
* \param cur_tree_id Current tree for multiclass training
*/
virtual void UpdateScore(const Tree* tree, const int cur_tree_id);
/*!
* \brief eval results for one metric
*/
virtual std::vector<double> EvalOneMetric(const Metric* metric, const double* score) const;
/*!
* \brief Print metric result of current iteration
* \param iter Current iteration
* \return best_msg if met early_stopping
*/
std::string OutputMetric(int iter);
double BoostFromAverage(int class_id, bool update_scorer);
/*! \brief current iteration */
int iter_;
/*! \brief Pointer to training data */
const Dataset* train_data_;
/*! \brief Config of gbdt */
std::unique_ptr<Config> config_;
/*! \brief Tree learner, will use this class to learn trees */
std::unique_ptr<TreeLearner> tree_learner_;
/*! \brief Objective function */
const ObjectiveFunction* objective_function_;
/*! \brief Store and update training data's score */
std::unique_ptr<ScoreUpdater> train_score_updater_;
/*! \brief Metrics for training data */
std::vector<const Metric*> training_metrics_;
/*! \brief Store and update validation data's scores */
std::vector<std::unique_ptr<ScoreUpdater>> valid_score_updater_;
/*! \brief Metric for validation data */
std::vector<std::vector<const Metric*>> valid_metrics_;
/*! \brief Number of rounds for early stopping */
int early_stopping_round_;
/*! \brief Only use first metric for early stopping */
bool es_first_metric_only_;
/*! \brief Best iteration(s) for early stopping */
std::vector<std::vector<int>> best_iter_;
/*! \brief Best score(s) for early stopping */
std::vector<std::vector<double>> best_score_;
/*! \brief output message of best iteration */
std::vector<std::vector<std::string>> best_msg_;
/*! \brief Trained models(trees) */
std::vector<std::unique_ptr<Tree>> models_;
/*! \brief Max feature index of training data*/
int max_feature_idx_;
/*! \brief Parser config file content */
std::string parser_config_str_ = "";
#if defined(USE_CUDA) || defined(USE_CUDA_EXP)
/*! \brief First order derivative of training data */
std::vector<score_t, CHAllocator<score_t>> gradients_;
/*! \brief Second order derivative of training data */
std::vector<score_t, CHAllocator<score_t>> hessians_;
#else
/*! \brief First order derivative of training data */
std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> gradients_;
/*! \brief Second order derivative of training data */
std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> hessians_;
#endif
/*! \brief Store the indices of in-bag data */
std::vector<data_size_t, Common::AlignmentAllocator<data_size_t, kAlignedSize>> bag_data_indices_;
/*! \brief Number of in-bag data */
data_size_t bag_data_cnt_;
/*! \brief Number of training data */
data_size_t num_data_;
/*! \brief Number of trees per iterations */
int num_tree_per_iteration_;
/*! \brief Number of class */
int num_class_;
/*! \brief Index of label column */
data_size_t label_idx_;
/*! \brief number of used model */
int num_iteration_for_pred_;
/*! \brief Start iteration of used model */
int start_iteration_for_pred_;
/*! \brief Shrinkage rate for one iteration */
double shrinkage_rate_;
/*! \brief Number of loaded initial models */
int num_init_iteration_;
/*! \brief Feature names */
std::vector<std::string> feature_names_;
std::vector<std::string> feature_infos_;
std::unique_ptr<Dataset> tmp_subset_;
bool is_use_subset_;
std::vector<bool> class_need_train_;
bool is_constant_hessian_;
std::unique_ptr<ObjectiveFunction> loaded_objective_;
bool average_output_;
bool need_re_bagging_;
bool balanced_bagging_;
std::string loaded_parameter_;
std::vector<int8_t> monotone_constraints_;
const int bagging_rand_block_ = 1024;
std::vector<Random> bagging_rands_;
ParallelPartitionRunner<data_size_t, false> bagging_runner_;
Json forced_splits_json_;
bool linear_tree_;
};
} // namespace LightGBM
#endif // LightGBM_BOOSTING_GBDT_H_
|
link-list-alg-openmp3x.c | /*********************************************************************************
OpenMP-3.0 Example Codes Beta-v1.0
File : link-list-alg-openmp3x.c
Date : March 2010
Description : The program perform the link list traversal(irregular parallelism)
in parallel using the openmp-3.0 feature task construct and
openMP-2.5 approach and measure the time taken in both the
approches.
a) incrementList_Wtake (OpenMP-2.5) : When a thread encounter the
parallel construct it creates the team of threads. The single
construct inside a parallel region restrict that only one thread
at a time can process the node. Its an unintuitive and inefficient
because only one thread at time is involved in processing which
incured relatively high cost of the single construct.
b) incrementList_Task(OpenMP-3.0) : This approach uses
the openMP-3.0 task construct. Whenever a thread encounters
a task construct, a new explicit task, An explicit task may be
executed by any thread in the current team, in parallel with
other tasks.In this approach the several task can be executed in
parallel.
OpenMP pragma/
Directive used : #pragma omp parallel
#pragma omp single
#pragma omp task
Input : - Number of Nodes in the link list
- Number of threads to be used
Output : Time taken in both approach.
*********************************************************************************/
/* Header file inclusion */
#include<stdio.h>
#include<stdlib.h>
#include <omp.h>
/* Global variable declaration */
long int totalNodes;
int numThreads;
typedef struct node node;
/* Structure for link list node */
struct node
{
int data;
struct node *link;
};
/* Function declaration to perform operation on Link List */
node *createList(node ** );
void incrementList_Task(node *);
void incrementListItem_Wtask(node *);
void processList(node *);
//void traverseList(node *);
/* Main Function */
int main(int argc,char** argv)
{
node *start;
start = NULL;
/* Checking for command line arguments */
if( argc != 3 ){
printf("\t\t Very Few Arguments\n ");
printf("\t\t Syntax : exec <total-nodes> <No. of Threads>\n");
exit(-1);
}
/* Initalizing Number of Nodes in the List and
Number of threads */
totalNodes =atol(argv[1]);
numThreads =atoi(argv[2]);
if(totalNodes<=0)
{
printf("\n\t Error : Number of nodes should be greater then 0\n");
exit(-1);
}
/* Function Calling to create the link list */
start=createList(&start);
printf("\n\t\t Total Nodes in the List : %ld ",totalNodes);
printf("\n\t\t Number of threads : %d ",numThreads);
/* Check for the Empty link list condition */
if ( start != NULL){
//traverseList(start);
/* Function Calling to process the list using Task Construct */
incrementList_Task(start);
/* Function Calling to process the list using Parallel Construct */
incrementListItem_Wtask(start);
//traverseList(start);
}
else
printf("\n List is Empty \n");
return 0;
} /* End of main */
/* Description : Function to create the link list
@param [ **q] : Start pointer to link list
@return : Start pointer to the list
*/
struct node *createList(node **q)
{
node *temp,*r,*head;
temp = *q;
int count=1;
while(count <= totalNodes ) {
/* If the node is the first node of the list */
if(*q==NULL)
{
/* Create the node */
if((temp = (node *)malloc(sizeof(struct node)))==NULL){
perror("\n\t Memory allocation for newnode ");
printf(" \n\t Creating the singly Link List.................Failed. \n");
exit(-1);
}
temp->data= rand();
temp->link=NULL;
*q=temp;
head=temp;
}
else
{
temp = *q;
while(temp->link !=NULL)
{
temp=temp->link;
}
/* Create the node */
if((r = (node *)malloc(sizeof(struct node)))==NULL){
perror("\n\t Memory allocation for newnode ");
printf(" \n\t Creating the singly Link List.................Failed. \n");
exit(-1);
}
r->data=rand() ;
r->link=NULL;
temp->link=r;
}
count++;
}
return head ;
} /* End of Create List function */
/* Description : Function to increment the List items using TASK CONSTRUCT (openmp-3.0)
Whenever a thread encounters a task construct, a new explicit task,
An explicit task may be executed by any thread in the current team,
in parallel with other tasks.In this approach the several task can
be executed in parallel.
@param [node] : start pointer to link list
@return : None
*/
void incrementList_Task(node *head)
{
double start_time,end_time;
/* Set the number of threads in parallel region */
omp_set_num_threads(numThreads);
/* Get the start time */
start_time=omp_get_wtime();
/* Create the parallel region */
#pragma omp parallel
{
#pragma omp single /*Restrict single thread create the task */
{
node * q = head;
while (q) {
#pragma omp task /* Create the task */
processList(q);
q = q->link;
}
}
} /* end of the parllel region */
end_time=omp_get_wtime();
printf("\n\t\t Time taken ( Task Construct : openMP-3.0) : %lf sec", (end_time -start_time));
} /* End of the function */
/* Description : Function to increment the List items WITHOUT using TASK CONSTRUCT (openmp-2.5)
When a thread encounter the parallel construct it creates the team of threads.
The single construct inside a parallel region restrict that only one thread
t a time can process the node. Its an unintuitive and inefficient because only
one thread at time is involved in processing which incured relatively high cost of
the single construct.
@param [head]: start pointer to link list
@return : None
*/
void incrementListItem_Wtask(node *head)
{
double start_time,end_time;
int list_Node[totalNodes],total_elements=0,i;
/* Set the number of threads */
omp_set_num_threads(numThreads);
/* get the start time */
start_time=omp_get_wtime();
/* Create the team of thread */
#pragma omp parallel
{
node * q = head;
while (q) {
#pragma omp single /* Restrict single thread process the list */
processList(q);
q = q->link;
}
}
end_time=omp_get_wtime();
printf("\n\t\t Time taken ( Parallel / Single Directive : OpenMP-2.5) : %lf sec\n", (end_time -start_time));
}/* End of the function */
/*
Description : Function to increment the node item.
@Param[q] : pointer to the current node
@return : None
*/
void processList(struct node *q)
{
//printf("\n\t My thread ID %d\n", omp_get_thread_num());
q->data = (q->data + q->data);
}
/* Function to travese the list */
/*void traverseList(node *q)
{
if(q==NULL)
{
printf("\n\nEmpty Link List.Can't Display The Data");
exit(-1);
}
printf("\t");
while(q!=NULL)
{
printf(" %d--->",q->data);
q=q->link;
}
}*/
|
grid_ao.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <complex.h>
#include "config.h"
#include "cint.h"
#include "vhf/fblas.h"
#include "gto/grid_ao_drv.h"
#define MIN(X,Y) ((X)<(Y)?(X):(Y))
#define MAX(X,Y) ((X)>(Y)?(X):(Y))
#define ALL_IMAGES 255
#define IMGBLK 40
#define OF_CMPLX 2
double CINTcommon_fac_sp(int l);
void GTOshell_eval_grid_cart(double *gto, double *ri, double *exps,
double *coord, double *alpha, double *coeff, double *env,
int l, int np, int nc, size_t nao, size_t ngrids, size_t bgrids);
void GTOshell_eval_grid_cart_deriv1(double *gto, double *ri, double *exps,
double *coord, double *alpha, double *coeff, double *env,
int l, int np, int nc, size_t nao, size_t ngrids, size_t bgrids);
void GTOshell_eval_grid_cart_deriv2(double *cgto, double *ri, double *exps,
double *coord, double *alpha, double *coeff, double *env,
int l, int np, int nc, size_t nao, size_t ngrids, size_t bgrids);
void GTOshell_eval_grid_cart_deriv3(double *cgto, double *ri, double *exps,
double *coord, double *alpha, double *coeff, double *env,
int l, int np, int nc, size_t nao, size_t ngrids, size_t bgrids);
void GTOshell_eval_grid_cart_deriv4(double *cgto, double *ri, double *exps,
double *coord, double *alpha, double *coeff, double *env,
int l, int np, int nc, size_t nao, size_t ngrids, size_t bgrids);
void GTOshell_eval_grid_cart(double *gto, double *ri, double *exps,
double *coord, double *alpha, double *coeff,
double *env, int l, int np, int nc,
size_t nao, size_t ngrids, size_t bgrids);
void GTOshell_eval_grid_ip_cart(double *gto, double *ri, double *exps,
double *coord, double *alpha, double *coeff,
double *env, int l, int np, int nc,
size_t nao, size_t ngrids, size_t bgrids);
/*
* Extend the meaning of non0table: given shell ID and block ID,
* non0table is the number of images in Ls that does not vanish.
* Ls should be sorted based on the distance to center cell.
*/
void PBCnr_ao_screen(unsigned char *non0table, double *coords, int ngrids,
double *Ls, int nimgs,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int nblk = (ngrids+BLKSIZE-1) / BLKSIZE;
#pragma omp parallel
{
int i, j, m;
int np, nc, atm_id;
size_t bas_id, ib;
double rr, arr, maxc;
double logcoeff[NPRIMAX];
double dr[3];
double rL[3];
double *p_exp, *pcoeff, *ratm;
#pragma omp for nowait schedule(dynamic)
for (bas_id = 0; bas_id < nbas; bas_id++) {
np = bas[NPRIM_OF+bas_id*BAS_SLOTS];
nc = bas[NCTR_OF +bas_id*BAS_SLOTS];
p_exp = env + bas[PTR_EXP+bas_id*BAS_SLOTS];
pcoeff = env + bas[PTR_COEFF+bas_id*BAS_SLOTS];
atm_id = bas[ATOM_OF+bas_id*BAS_SLOTS];
ratm = env + atm[atm_id*ATM_SLOTS+PTR_COORD];
for (j = 0; j < np; j++) {
maxc = 0;
for (i = 0; i < nc; i++) {
maxc = MAX(maxc, fabs(pcoeff[i*np+j]));
}
logcoeff[j] = log(maxc);
}
for (ib = 0; ib < nblk; ib++) {
for (m = nimgs-1; m >= 0; m--) {
rL[0] = ratm[0] + Ls[m*3+0];
rL[1] = ratm[1] + Ls[m*3+1];
rL[2] = ratm[2] + Ls[m*3+2];
for (i = ib*BLKSIZE; i < MIN(ngrids, (ib+1)*BLKSIZE); i++) {
dr[0] = coords[0*ngrids+i] - rL[0];
dr[1] = coords[1*ngrids+i] - rL[1];
dr[2] = coords[2*ngrids+i] - rL[2];
rr = dr[0]*dr[0] + dr[1]*dr[1] + dr[2]*dr[2];
for (j = 0; j < np; j++) {
arr = p_exp[j] * rr;
if (arr-logcoeff[j] < EXPCUTOFF) {
non0table[ib*nbas+bas_id] = MIN(ALL_IMAGES, m+1);
goto next_blk;
}
}
}
}
non0table[ib*nbas+bas_id] = 0;
next_blk:;
}
}
}
}
static void _copy(double complex *out, double *ao_k,
size_t ngrids, size_t bgrids,
int nkpts, int ncomp, int nao, int ncol)
{
int i, j, k, ic;
double complex *pout;
double *ao_r, *ao_i;
int blksize = ncomp * ncol * bgrids;
for (k = 0; k < nkpts; k++) {
ao_r = ao_k + k*2 * blksize;
ao_i = ao_k +(k*2+1) * blksize;
for (ic = 0; ic < ncomp; ic++) {
pout = out + (k * ncomp + ic) * nao * ngrids;
for (j = 0; j < ncol; j++) {
for (i = 0; i < bgrids; i++) {
pout[j*ngrids+i] = (ao_r[j*bgrids+i] +
ao_i[j*bgrids+i]*_Complex_I);
} }
ao_r += ncol * bgrids;
ao_i += ncol * bgrids;
}
}
}
// grid2atm[nimgs,xyz,grid_id]
static void _fill_grid2atm(double *grid2atm, double *min_grid2atm,
double *coord, double *Ls, double *r_atm,
int atm_imag_max, size_t bgrids, size_t ngrids, int nimgs)
{
int ig, m;
double rL[3];
double dist;
double dist_min;
for (m = 0; m < nimgs; m++) {
if ((m < atm_imag_max || atm_imag_max == ALL_IMAGES)) {
rL[0] = r_atm[0] + Ls[m*3+0];
rL[1] = r_atm[1] + Ls[m*3+1];
rL[2] = r_atm[2] + Ls[m*3+2];
dist_min = 1e9;
for (ig = 0; ig < bgrids; ig++) {
grid2atm[0*BLKSIZE+ig] = coord[0*ngrids+ig] - rL[0];
grid2atm[1*BLKSIZE+ig] = coord[1*ngrids+ig] - rL[1];
grid2atm[2*BLKSIZE+ig] = coord[2*ngrids+ig] - rL[2];
dist = (grid2atm[0*BLKSIZE+ig]*grid2atm[0*BLKSIZE+ig] +
grid2atm[1*BLKSIZE+ig]*grid2atm[1*BLKSIZE+ig] +
grid2atm[2*BLKSIZE+ig]*grid2atm[2*BLKSIZE+ig]);
dist_min = MIN(dist, dist_min);
}
min_grid2atm[m] = sqrt(dist_min);
}
grid2atm += 3*BLKSIZE;
}
}
void PBCeval_cart_iter(FPtr_eval feval, FPtr_exp fexp,
size_t nao, size_t ngrids, size_t bgrids, size_t offao,
int param[], int *shls_slice, int *ao_loc, double *buf,
double *Ls, double complex *expLk,
int nimgs, int nkpts, int di_max, double complex *ao,
double *coord, double *rcut, unsigned char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ncomp = param[TENSOR];
const int sh0 = shls_slice[0];
const int sh1 = shls_slice[1];
const char TRANS_N = 'N';
const char TRANS_T = 'T';
const double D1 = 1;
const int nkpts2 = nkpts * OF_CMPLX;
int i, j, k, l, np, nc, atm_id, bas_id, deg, ao_id;
int iL, iL0, iLcount, dimc;
int grid2atm_atm_id, count;
double fac;
double *p_exp, *pcoeff, *pcoord, *pao, *ri;
double *grid2atm = buf; // shape [nimgs,3,bgrids]
double *eprim = grid2atm + nimgs*3*BLKSIZE;
double *aobuf = eprim + NPRIMAX*BLKSIZE*2;
double *aobufk = aobuf + IMGBLK*ncomp*di_max*bgrids;
double *Lk_buf = aobufk + nkpts*ncomp*di_max*bgrids * OF_CMPLX;
double complex *zLk_buf = (double complex *)Lk_buf;
double *min_grid2atm = Lk_buf + IMGBLK * nkpts * OF_CMPLX;
double *pexpLk;
int img_idx[nimgs];
int atm_imag_max[natm];
for (i = 0; i < natm; i++) {
atm_imag_max[i] = 0;
}
for (bas_id = sh0; bas_id < sh1; bas_id++) {
atm_id = bas[bas_id*BAS_SLOTS+ATOM_OF];
atm_imag_max[atm_id] = MAX(atm_imag_max[atm_id], non0table[bas_id]);
}
grid2atm_atm_id = -1;
for (bas_id = sh0; bas_id < sh1; bas_id++) {
np = bas[bas_id*BAS_SLOTS+NPRIM_OF];
nc = bas[bas_id*BAS_SLOTS+NCTR_OF ];
l = bas[bas_id*BAS_SLOTS+ANG_OF ];
deg = (l+1)*(l+2)/2;
dimc = nc*deg * ncomp * bgrids;
fac = CINTcommon_fac_sp(l);
p_exp = env + bas[bas_id*BAS_SLOTS+PTR_EXP];
pcoeff = env + bas[bas_id*BAS_SLOTS+PTR_COEFF];
atm_id = bas[bas_id*BAS_SLOTS+ATOM_OF];
ri = env + atm[PTR_COORD+atm_id*ATM_SLOTS];
ao_id = ao_loc[bas_id] - ao_loc[sh0];
if (grid2atm_atm_id != atm_id) {
_fill_grid2atm(grid2atm, min_grid2atm, coord, Ls, ri,
atm_imag_max[atm_id], bgrids, ngrids, nimgs);
grid2atm_atm_id = atm_id;
}
for (i = 0; i < nkpts2*dimc; i++) {
aobufk[i] = 0;
}
for (iL0 = 0; iL0 < nimgs; iL0+=IMGBLK) {
iLcount = MIN(IMGBLK, nimgs - iL0);
count = 0;
for (iL = iL0; iL < iL0+iLcount; iL++) {
pcoord = grid2atm + iL * 3*BLKSIZE;
if ((iL < non0table[bas_id] || non0table[bas_id] == ALL_IMAGES) &&
(min_grid2atm[iL] < rcut[bas_id]) &&
(*fexp)(eprim, pcoord, p_exp, pcoeff, l, np, nc, bgrids, fac)) {
pao = aobuf + count * dimc;
(*feval)(pao, ri, eprim, pcoord, p_exp, pcoeff, env,
l, np, nc, nc*deg, bgrids, bgrids);
img_idx[count] = iL;
count += 1;
}
}
if (count > 0) {
if (img_idx[count-1] != iL0 + count-1) {
// some images are skipped
for (i = 0; i < count; i++) {
j = img_idx[i];
for (k = 0; k < nkpts; k++) {
zLk_buf[i*nkpts+k] = expLk[j*nkpts+k];
}
}
pexpLk = Lk_buf;
} else {
pexpLk = (double *)(expLk + nkpts * iL0);
}
dgemm_(&TRANS_N, &TRANS_T, &dimc, &nkpts2, &count,
&D1, aobuf, &dimc, pexpLk, &nkpts2, &D1, aobufk, &dimc);
}
}
_copy(ao+ao_id*ngrids+offao, aobufk,
ngrids, bgrids, nkpts, ncomp, nao, nc*deg);
}
}
void PBCeval_sph_iter(FPtr_eval feval, FPtr_exp fexp,
size_t nao, size_t ngrids, size_t bgrids, size_t offao,
int param[], int *shls_slice, int *ao_loc, double *buf,
double *Ls, double complex *expLk,
int nimgs, int nkpts, int di_max, double complex *ao,
double *coord, double *rcut, unsigned char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ncomp = param[TENSOR];
const int sh0 = shls_slice[0];
const int sh1 = shls_slice[1];
const char TRANS_N = 'N';
const char TRANS_T = 'T';
const double D1 = 1;
const int nkpts2 = nkpts * OF_CMPLX;
int i, j, k, l, np, nc, atm_id, bas_id, deg, dcart, ao_id;
int iL, iL0, iLcount, dimc;
int grid2atm_atm_id, count;
double fac;
double *p_exp, *pcoeff, *pcoord, *pcart, *pao, *ri;
double *grid2atm = buf; // shape [nimgs,3,bgrids]
double *eprim = grid2atm + nimgs*3*BLKSIZE;
double *aobuf = eprim + NPRIMAX*BLKSIZE*2;
double *aobufk = aobuf + IMGBLK*ncomp*di_max*bgrids;
double *Lk_buf = aobufk + nkpts*ncomp*di_max*bgrids * OF_CMPLX;
double complex *zLk_buf = (double complex *)Lk_buf;
double *cart_gto = Lk_buf + IMGBLK * nkpts * OF_CMPLX;
double *min_grid2atm = cart_gto + ncomp*NCTR_CART*bgrids;
double *pexpLk;
int img_idx[nimgs];
int atm_imag_max[natm];
for (i = 0; i < natm; i++) {
atm_imag_max[i] = 0;
}
for (bas_id = sh0; bas_id < sh1; bas_id++) {
atm_id = bas[bas_id*BAS_SLOTS+ATOM_OF];
atm_imag_max[atm_id] = MAX(atm_imag_max[atm_id], non0table[bas_id]);
}
grid2atm_atm_id = -1;
for (bas_id = sh0; bas_id < sh1; bas_id++) {
np = bas[bas_id*BAS_SLOTS+NPRIM_OF];
nc = bas[bas_id*BAS_SLOTS+NCTR_OF ];
l = bas[bas_id*BAS_SLOTS+ANG_OF ];
deg = l * 2 + 1;
dcart = (l+1)*(l+2)/2;
dimc = nc*deg * ncomp * bgrids;
fac = CINTcommon_fac_sp(l);
p_exp = env + bas[bas_id*BAS_SLOTS+PTR_EXP];
pcoeff = env + bas[bas_id*BAS_SLOTS+PTR_COEFF];
atm_id = bas[bas_id*BAS_SLOTS+ATOM_OF];
ri = env + atm[PTR_COORD+atm_id*ATM_SLOTS];
ao_id = ao_loc[bas_id] - ao_loc[sh0];
if (grid2atm_atm_id != atm_id) {
_fill_grid2atm(grid2atm, min_grid2atm, coord, Ls, ri,
atm_imag_max[atm_id], bgrids, ngrids, nimgs);
grid2atm_atm_id = atm_id;
}
for (i = 0; i < nkpts2*dimc; i++) {
aobufk[i] = 0;
}
for (iL0 = 0; iL0 < nimgs; iL0+=IMGBLK) {
iLcount = MIN(IMGBLK, nimgs - iL0);
count = 0;
for (iL = iL0; iL < iL0+iLcount; iL++) {
pcoord = grid2atm + iL * 3*BLKSIZE;
if ((iL < non0table[bas_id] || non0table[bas_id] == ALL_IMAGES) &&
(min_grid2atm[iL] < rcut[bas_id]) &&
(*fexp)(eprim, pcoord, p_exp, pcoeff, l, np, nc, bgrids, fac)) {
pao = aobuf + count * dimc;
if (l <= 1) { // s, p functions
(*feval)(pao, ri, eprim, pcoord, p_exp, pcoeff, env,
l, np, nc, nc*dcart, bgrids, bgrids);
} else {
(*feval)(cart_gto, ri, eprim, pcoord, p_exp, pcoeff, env,
l, np, nc, nc*dcart, bgrids, bgrids);
pcart = cart_gto;
for (i = 0; i < ncomp * nc; i++) {
CINTc2s_ket_sph1(pao, pcart, bgrids, bgrids, l);
pao += deg * bgrids;
pcart += dcart * bgrids;
}
}
img_idx[count] = iL;
count++;
}
}
if (count > 0) {
if (img_idx[count-1] != iL0 + count-1) {
// some images are skipped
for (i = 0; i < count; i++) {
j = img_idx[i];
for (k = 0; k < nkpts; k++) {
zLk_buf[i*nkpts+k] = expLk[j*nkpts+k];
}
}
pexpLk = Lk_buf;
} else {
pexpLk = (double *)(expLk + nkpts * iL0);
}
dgemm_(&TRANS_N, &TRANS_T, &dimc, &nkpts2, &count,
&D1, aobuf, &dimc, pexpLk, &nkpts2, &D1, aobufk, &dimc);
}
}
_copy(ao+ao_id*ngrids+offao, aobufk,
ngrids, bgrids, nkpts, ncomp, nao, nc*deg);
}
}
int GTOshloc_by_atom(int *shloc, int *shls_slice, int *ao_loc, int *atm, int *bas);
/*
* blksize <= 1024 to avoid stack overflow
*
* non0table[ngrids/blksize,natm] is the T/F table for ao values to
* screen the ao evaluation for each shell
*/
void PBCeval_loop(void (*fiter)(), FPtr_eval feval, FPtr_exp fexp,
int ngrids, int param[], int *shls_slice, int *ao_loc,
double *Ls, int nimgs, double complex *expLk, int nkpts,
double complex *ao, double *coord,
double *rcut, unsigned char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
int shloc[shls_slice[1]-shls_slice[0]+1];
const int nshblk = GTOshloc_by_atom(shloc, shls_slice, ao_loc, atm, bas);
const int nblk = (ngrids+BLKSIZE-1) / BLKSIZE;
const size_t Ngrids = ngrids;
int i;
int di_max = 0;
for (i = shls_slice[0]; i < shls_slice[1]; i++) {
di_max = MAX(di_max, ao_loc[i+1] - ao_loc[i]);
}
#pragma omp parallel
{
const int sh0 = shls_slice[0];
const int sh1 = shls_slice[1];
const size_t nao = ao_loc[sh1] - ao_loc[sh0];
int ip, ib, k, iloc, ish;
size_t aoff, bgrids;
size_t bufsize =((nimgs*3 + NPRIMAX*2 +
nkpts *param[POS_E1]*param[TENSOR]*di_max * OF_CMPLX +
IMGBLK*param[POS_E1]*param[TENSOR]*di_max +
param[POS_E1]*param[TENSOR]*NCTR_CART) * BLKSIZE
+ nkpts * IMGBLK * OF_CMPLX + nimgs);
double *buf = malloc(sizeof(double) * bufsize);
#pragma omp for nowait schedule(dynamic, 1)
for (k = 0; k < nblk*nshblk; k++) {
iloc = k / nblk;
ish = shloc[iloc];
ib = k - iloc * nblk;
ip = ib * BLKSIZE;
aoff = (ao_loc[ish] - ao_loc[sh0]) * Ngrids + ip;
bgrids = MIN(ngrids-ip, BLKSIZE);
(*fiter)(feval, fexp, nao, Ngrids, bgrids, aoff,
param, shloc+iloc, ao_loc, buf,
Ls, expLk, nimgs, nkpts, di_max,
ao, coord+ip, rcut, non0table+ib*nbas,
atm, natm, bas, nbas, env);
}
free(buf);
}
}
void PBCeval_cart_drv(FPtr_eval feval, FPtr_exp fexp,
int ngrids, int param[], int *shls_slice, int *ao_loc,
double *Ls, int nimgs, double complex *expLk, int nkpts,
double complex *ao, double *coord,
double *rcut, unsigned char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
PBCeval_loop(PBCeval_cart_iter, feval, fexp,
ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts,
ao, coord, rcut, non0table, atm, natm, bas, nbas, env);
}
void PBCeval_sph_drv(FPtr_eval feval, FPtr_exp fexp,
int ngrids, int param[], int *shls_slice, int *ao_loc,
double *Ls, int nimgs, double complex *expLk, int nkpts,
double complex *ao, double *coord,
double *rcut, unsigned char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
PBCeval_loop(PBCeval_sph_iter, feval, fexp,
ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts,
ao, coord, rcut, non0table, atm, natm, bas, nbas, env);
}
void PBCGTOval_cart_deriv0(int ngrids, int *shls_slice, int *ao_loc,
double *Ls, int nimgs, double complex *expLk, int nkpts,
double complex *ao, double *coord,
double *rcut, unsigned char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
int param[] = {1, 1};
PBCeval_cart_drv(GTOshell_eval_grid_cart, GTOcontract_exp0,
ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts,
ao, coord, rcut, non0table, atm, natm, bas, nbas, env);
}
void PBCGTOval_sph_deriv0(int ngrids, int *shls_slice, int *ao_loc,
double *Ls, int nimgs, double complex *expLk, int nkpts,
double complex *ao, double *coord,
double *rcut, unsigned char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
int param[] = {1, 1};
PBCeval_sph_drv(GTOshell_eval_grid_cart, GTOcontract_exp0,
ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts,
ao, coord, rcut, non0table, atm, natm, bas, nbas, env);
}
void PBCGTOval_cart_deriv1(int ngrids, int *shls_slice, int *ao_loc,
double *Ls, int nimgs, double complex *expLk, int nkpts,
double complex *ao, double *coord,
double *rcut, unsigned char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
int param[] = {1, 4};
PBCeval_cart_drv(GTOshell_eval_grid_cart_deriv1, GTOcontract_exp1,
ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts,
ao, coord, rcut, non0table, atm, natm, bas, nbas, env);
}
void PBCGTOval_sph_deriv1(int ngrids, int *shls_slice, int *ao_loc,
double *Ls, int nimgs, double complex *expLk, int nkpts,
double complex *ao, double *coord,
double *rcut, unsigned char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
int param[] = {1, 4};
PBCeval_sph_drv(GTOshell_eval_grid_cart_deriv1, GTOcontract_exp1,
ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts,
ao, coord, rcut, non0table, atm, natm, bas, nbas, env);
}
void PBCGTOval_cart_deriv2(int ngrids, int *shls_slice, int *ao_loc,
double *Ls, int nimgs, double complex *expLk, int nkpts,
double complex *ao, double *coord,
double *rcut, unsigned char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
int param[] = {1, 10};
PBCeval_cart_drv(GTOshell_eval_grid_cart_deriv2, GTOprim_exp,
ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts,
ao, coord, rcut, non0table, atm, natm, bas, nbas, env);
}
void PBCGTOval_sph_deriv2(int ngrids, int *shls_slice, int *ao_loc,
double *Ls, int nimgs, double complex *expLk, int nkpts,
double complex *ao, double *coord,
double *rcut, unsigned char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
int param[] = {1, 10};
PBCeval_sph_drv(GTOshell_eval_grid_cart_deriv2, GTOprim_exp,
ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts,
ao, coord, rcut, non0table, atm, natm, bas, nbas, env);
}
void PBCGTOval_cart_deriv3(int ngrids, int *shls_slice, int *ao_loc,
double *Ls, int nimgs, double complex *expLk, int nkpts,
double complex *ao, double *coord,
double *rcut, unsigned char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
int param[] = {1, 20};
PBCeval_cart_drv(GTOshell_eval_grid_cart_deriv3, GTOprim_exp,
ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts,
ao, coord, rcut, non0table, atm, natm, bas, nbas, env);
}
void PBCGTOval_sph_deriv3(int ngrids, int *shls_slice, int *ao_loc,
double *Ls, int nimgs, double complex *expLk, int nkpts,
double complex *ao, double *coord,
double *rcut, unsigned char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
int param[] = {1, 20};
PBCeval_sph_drv(GTOshell_eval_grid_cart_deriv3, GTOprim_exp,
ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts,
ao, coord, rcut, non0table, atm, natm, bas, nbas, env);
}
void PBCGTOval_cart_deriv4(int ngrids, int *shls_slice, int *ao_loc,
double *Ls, int nimgs, double complex *expLk, int nkpts,
double complex *ao, double *coord,
double *rcut, unsigned char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
int param[] = {1, 35};
PBCeval_cart_drv(GTOshell_eval_grid_cart_deriv4, GTOprim_exp,
ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts,
ao, coord, rcut, non0table, atm, natm, bas, nbas, env);
}
void PBCGTOval_sph_deriv4(int ngrids, int *shls_slice, int *ao_loc,
double *Ls, int nimgs, double complex *expLk, int nkpts,
double complex *ao, double *coord,
double *rcut, unsigned char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
int param[] = {1, 35};
PBCeval_sph_drv(GTOshell_eval_grid_cart_deriv4, GTOprim_exp,
ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts,
ao, coord, rcut, non0table, atm, natm, bas, nbas, env);
}
void PBCGTOval_cart(int ngrids, int *shls_slice, int *ao_loc,
double *Ls, int nimgs, double complex *expLk, int nkpts,
double complex *ao, double *coord,
double *rcut, unsigned char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
// int param[] = {1, 1};
// PBCeval_cart_drv(GTOshell_eval_grid_cart, GTOcontract_exp0,
// ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts,
// ao, coord, rcut, non0table, atm, natm, bas, nbas, env);
PBCGTOval_cart_deriv0(ngrids, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts,
ao, coord, rcut, non0table, atm, natm, bas, nbas, env);
}
void PBCGTOval_sph(int ngrids, int *shls_slice, int *ao_loc,
double *Ls, int nimgs, double complex *expLk, int nkpts,
double complex *ao, double *coord,
double *rcut, unsigned char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
// int param[] = {1, 1};
// PBCeval_sph_drv(GTOshell_eval_grid_cart, GTOcontract_exp0,
// ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts,
// ao, coord, rcut, non0table, atm, natm, bas, nbas, env);
PBCGTOval_sph_deriv0(ngrids, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts,
ao, coord, rcut, non0table, atm, natm, bas, nbas, env);
}
void PBCGTOval_ip_cart(int ngrids, int *shls_slice, int *ao_loc,
double *Ls, int nimgs, double complex *expLk, int nkpts,
double complex *ao, double *coord,
double *rcut, unsigned char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
int param[] = {1, 3};
PBCeval_cart_drv(GTOshell_eval_grid_ip_cart, GTOcontract_exp1,
ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts,
ao, coord, rcut, non0table, atm, natm, bas, nbas, env);
}
void PBCGTOval_ip_sph(int ngrids, int *shls_slice, int *ao_loc,
double *Ls, int nimgs, double complex *expLk, int nkpts,
double complex *ao, double *coord,
double *rcut, unsigned char *non0table,
int *atm, int natm, int *bas, int nbas, double *env)
{
int param[] = {1, 3};
PBCeval_sph_drv(GTOshell_eval_grid_ip_cart, GTOcontract_exp1,
ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts,
ao, coord, rcut, non0table, atm, natm, bas, nbas, env);
}
|
convolution_packn_fp16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convolution_packn_fp16s_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_fp16, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt)
{
const int packn = csrr_vlenb() / 2;
const word_type vl = vsetvl_e16m1(packn);
int w = bottom_blob.w;
int channels = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
const float* bias_data_ptr = bias_data;
// num_output
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
__fp16* outptr = top_blob.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
vfloat32m2_t _sum = vfmv_v_f_f32m2(0.f, vl);
if (bias_data_ptr)
{
_sum = vle32_v_f32m2(bias_data_ptr + p * packn, vl);
}
const __fp16* kptr = weight_data_fp16.channel(p);
// channels
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob.channel(q);
const __fp16* sptr = m.row<const __fp16>(i * stride_h) + j * stride_w * packn;
for (int k = 0; k < maxk; k++)
{
const __fp16* slptr = sptr + space_ofs[k] * packn;
for (int l = 0; l < packn; l++)
{
float val = (float)*slptr++;
vfloat16m1_t _w0 = vle16_v_f16m1(kptr, vl);
_sum = vfwmacc_vf_f32m2(_sum, val, _w0, vl);
kptr += packn;
}
}
}
_sum = activation_ps(_sum, activation_type, activation_params, vl);
vse16_v_f16m1(outptr + j * packn, vfncvt_f_f_w_f16m1(_sum, vl), vl);
}
outptr += outw * packn;
}
}
}
static void convolution_packn_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_fp16, const Mat& bias_data_fp16, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt)
{
const int packn = csrr_vlenb() / 2;
const word_type vl = vsetvl_e16m1(packn);
int w = bottom_blob.w;
int channels = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
const __fp16* bias_data_ptr = bias_data_fp16;
// num_output
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
__fp16* outptr = top_blob.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
vfloat16m1_t _sum = vfmv_v_f_f16m1(0.f, vl);
if (bias_data_ptr)
{
_sum = vle16_v_f16m1(bias_data_ptr + p * packn, vl);
}
const __fp16* kptr = weight_data_fp16.channel(p);
// channels
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob.channel(q);
const __fp16* sptr = m.row<const __fp16>(i * stride_h) + j * stride_w * packn;
for (int k = 0; k < maxk; k++)
{
const __fp16* slptr = sptr + space_ofs[k] * packn;
for (int l = 0; l < packn; l++)
{
__fp16 val = *slptr++;
vfloat16m1_t _w0 = vle16_v_f16m1(kptr, vl);
_sum = vfmacc_vf_f16m1(_sum, val, _w0, vl);
kptr += packn;
}
}
}
_sum = activation_ps(_sum, activation_type, activation_params, vl);
vse16_v_f16m1(outptr + j * packn, _sum, vl);
}
outptr += outw * packn;
}
}
}
|
terrain.c | #include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include "globals.h"
#include "terrain.h"
#include "../lib/FastNoiseLite.h"
#include "xorshift.h"
#include "utils.h"
static TerrainTreeNode terrain_tree_root = {0};
void initialize_terrain_tree(TerrainTreeNode *node, int depth) {
TerrainTreeNode *p = node;
unsigned int x = 0;
unsigned int y = 0;
for (int i = 0; i < depth; ++i) {
x |= p->metadata.parent_on_left << i;
y |= p->metadata.parent_on_top << i;
p = p->parent;
}
node->tp_offset = (int) (y * TERRAIN_SIZE + x);
node->tp_x = (int) x;
node->tp_y = (int) y;
if (depth == TERRAIN_SPAN) {
node->metadata.terminal_node = true;
} else {
node->childNW = (TerrainTreeNode *) malloc(sizeof(TerrainTreeNode));
*node->childNW = (TerrainTreeNode) {
.metadata = {
.need_updating = true,
.terminal_node = false,
.parent_on_left = 0,
.parent_on_top = 0,
},
.tp_offset = -1,
.tp_x = -1,
.tp_y = -1,
.parent = node,
.childNW = NULL,
.childNE = NULL,
.childSW = NULL,
.childSE = NULL,
};
node->childNE = (TerrainTreeNode *) malloc(sizeof(TerrainTreeNode));
*node->childNE = (TerrainTreeNode) {
.metadata = {
.need_updating = true,
.terminal_node = false,
.parent_on_left = 1,
.parent_on_top = 0,
},
.tp_offset = -1,
.tp_x = -1,
.tp_y = -1,
.parent = node,
.childNW = NULL,
.childNE = NULL,
.childSW = NULL,
.childSE = NULL,
};
node->childSW = (TerrainTreeNode *) malloc(sizeof(TerrainTreeNode));
*node->childSW = (TerrainTreeNode) {
.metadata = {
.need_updating = true,
.terminal_node = false,
.parent_on_left = 0,
.parent_on_top = 1,
},
.tp_offset = -1,
.tp_x = -1,
.tp_y = -1,
.parent = node,
.childNW = NULL,
.childNE = NULL,
.childSW = NULL,
.childSE = NULL,
};
node->childSE = (TerrainTreeNode *) malloc(sizeof(TerrainTreeNode));
*node->childSE = (TerrainTreeNode) {
.metadata = {
.need_updating = true,
.terminal_node = false,
.parent_on_left = 1,
.parent_on_top = 1,
},
.tp_offset = -1,
.tp_x = -1,
.tp_y = -1,
.parent = node,
.childNW = NULL,
.childNE = NULL,
.childSW = NULL,
.childSE = NULL,
};
initialize_terrain_tree(node->childNW, depth + 1);
initialize_terrain_tree(node->childNE, depth + 1);
initialize_terrain_tree(node->childSW, depth + 1);
initialize_terrain_tree(node->childSE, depth + 1);
}
}
void destroy_terrain_tree(TerrainTreeNode *node) {
if (!node) return;
if (!node->metadata.terminal_node) {
destroy_terrain_tree(node->childNW);
destroy_terrain_tree(node->childNE);
destroy_terrain_tree(node->childSW);
destroy_terrain_tree(node->childSE);
}
if (node->parent) free(node);
}
TerrainTreeNode *terrain_get_node_at(int x, int y) {
if (x < 0 || y < 0 || x >= TERRAIN_SIZE || y >= TERRAIN_SIZE) {
return NULL;
}
unsigned int tx = x;
unsigned int ty = y;
TerrainTreeNode *node = &terrain_tree_root;
TerrainTreeNode *last = NULL;
for (int i = TERRAIN_SPAN - 1; i >= 0; --i) {
bool left = (tx & 0x1u << i) != 0;
bool top = (ty & (0x1u << i)) != 0;
if (!node) {
printf("beans");
fflush(stdout);
}
last = node;
if (!top) {
if (!left) {
// printf("SE ");fflush(stdout);
if (!node->childSE) {
node->childSE = (TerrainTreeNode *) malloc(sizeof(TerrainTreeNode));
*node->childSE = (TerrainTreeNode) {
.metadata = {
.need_updating = false,
.terminal_node = false,
.parent_on_left = 1,
.parent_on_top = 1,
},
.tp_offset = -1,
.tp_x = -1,
.tp_y = -1,
.parent = node,
.childNW = NULL,
.childNE = NULL,
.childSW = NULL,
.childSE = NULL,
};
}
node = node->childSE;
} else {
// printf("SW ");fflush(stdout);
if (!node->childSW) {
node->childSW = (TerrainTreeNode *) malloc(sizeof(TerrainTreeNode));
*node->childSW = (TerrainTreeNode) {
.metadata = {
.need_updating = false,
.terminal_node = false,
.parent_on_left = 0,
.parent_on_top = 1,
},
.tp_offset = -1,
.tp_x = -1,
.tp_y = -1,
.parent = node,
.childNW = NULL,
.childNE = NULL,
.childSW = NULL,
.childSE = NULL,
};
}
node = node->childSW;
}
} else {
if (!left) {
// printf("NE ");fflush(stdout);
if (!node->childNE) {
node->childNE = (TerrainTreeNode *) malloc(sizeof(TerrainTreeNode));
*node->childNE = (TerrainTreeNode) {
.metadata = {
.need_updating = false,
.terminal_node = false,
.parent_on_left = 1,
.parent_on_top = 0,
},
.tp_offset = -1,
.tp_x = -1,
.tp_y = -1,
.parent = node,
.childNW = NULL,
.childNE = NULL,
.childSW = NULL,
.childSE = NULL,
};
}
node = node->childNE;
} else {
// printf("NW ");fflush(stdout);
if (!node->childNW) {
node->childNW = (TerrainTreeNode *) malloc(sizeof(TerrainTreeNode));
*node->childNW = (TerrainTreeNode) {
.metadata = {
.need_updating = false,
.terminal_node = false,
.parent_on_left = 0,
.parent_on_top = 0,
},
.tp_offset = -1,
.tp_x = -1,
.tp_y = -1,
.parent = node,
.childNW = NULL,
.childNE = NULL,
.childSW = NULL,
.childSE = NULL,
};
}
node = node->childNW;
}
}
}
if (node->tp_offset == -1) {
node->tp_offset = y * TERRAIN_SIZE + x;
node->tp_x = x;
node->tp_y = y;
node->metadata.terminal_node = true;
}
return node;
}
TerrainPixel terrain_get_pixel(int x, int y, TerrainPixel edge) {
if (x < 0 || y < 0 || x >= TERRAIN_SIZE || y >= TERRAIN_SIZE) return edge;
return TERRAIN[y * TERRAIN_SIZE + x];
}
void terrain_set_pixel(int x, int y, TerrainPixel terrain_pixel, bool should_update) {
if (x < 0 || y < 0 || x >= TERRAIN_SIZE || y >= TERRAIN_SIZE) return;
TERRAIN[y * TERRAIN_SIZE + x] = terrain_pixel;
if (should_update)
for (int tx = x - 1; tx <= x + 1; ++tx) {
for (int ty = y - 1; ty <= y + 1; ++ty) {
if (tx < 0 || ty < 0 || tx >= TERRAIN_SIZE || ty >= TERRAIN_SIZE) continue;
TerrainPixel *tp = &TERRAIN[ty * TERRAIN_SIZE + tx];
if (tp->type != TERRAIN_NONE_TYPE && tp->type != TERRAIN_DIRT_TYPE) {
TerrainTreeNode *node = terrain_get_node_at(tx, ty);
if (node != NULL) {
tp->needs_update |= should_update;
node->metadata.need_updating = tp->needs_update;
node = node->parent;
while (node != NULL && !node->metadata.need_updating) {
node->metadata.need_updating = 1;
node = node->parent;
}
}
}
}
}
}
void terrain_refresh_quadtree(TerrainTreeNode *node) {
if (!node) return;
if (node->metadata.terminal_node) {
TerrainPixel *tp = &TERRAIN[node->tp_offset];
node->metadata.need_updating = tp->needs_update != 0;
tp->has_moved = false;
tp->needs_update = 0;
} else {
if (node->metadata.need_updating) {
terrain_refresh_quadtree(node->childNW);
terrain_refresh_quadtree(node->childNE);
terrain_refresh_quadtree(node->childSW);
terrain_refresh_quadtree(node->childSE);
node->metadata.need_updating = (node->childNW && node->childNW->metadata.need_updating) ||
(node->childNE && node->childNE->metadata.need_updating) ||
(node->childSW && node->childSW->metadata.need_updating) ||
(node->childSE && node->childSE->metadata.need_updating);
} else if (node->parent) {
if (node->metadata.parent_on_top) {
if (node->metadata.parent_on_left) {
node->parent->childSE = NULL;
} else {
node->parent->childSW = NULL;
}
} else {
if (node->metadata.parent_on_left) {
node->parent->childNE = NULL;
} else {
node->parent->childNW = NULL;
}
}
destroy_terrain_tree(node);
node = NULL;
}
}
}
static TerrainPixel terrain_generator_dirt(int x, int y, fnl_state noise) {
double sx = x;
double sy = y * 2;
double sx_n = sx / (TERRAIN_SIZE); // sx, normalized to [0..1]
double sy_n = sy / (TERRAIN_SIZE * 2); // sy, normalized to [0..1]
// double sy_n2 = sy_n * sy_n;
// double sy_n3 = sy_n2 * sy_n;
// double sy_n4 = sy_n2 * sy_n2;
// double sy_n5 = sy_n3 * sy_n2;
// double sy_n6 = sy_n3 * sy_n3;
// double sy_n7 = sy_n4 * sy_n3;
// double sy_n8 = sy_n4 * sy_n4;
// double sy_n9 = sy_n5 * sy_n4;
// double sy_n10 = sy_n5 * sy_n5;
//// double threshold = pow(fabs(0.5 - sy_n) / sy_n, 2) * pow(sin(M_PI * sy_n), 3) * 2;
//// double threshold = -2 * cos(2 * M_PI * (sy_n - 0.5));
// double threshold = 1.95 + -36.3 * sy_n + 54.2 * sy_n2 + 1623 * sy_n3 + -11269 * sy_n4 + 31605 * sy_n5 + -44153 * sy_n6 + 30401 * sy_n7 + -8224 * sy_n8;
double threshold = fabs(sy_n - 0.5) > 0.45 ? 2 : -0.6;
double noise_shift = 0; // Positive => More common blobs. Negative => Less common blobs.
threshold += noise_shift;
if (threshold <= -1 || fnlGetNoise2D(&noise, sx, sy) > threshold) return TERRAIN_NONE;
return TERRAIN_DIRT;
return TERRAIN_SAND;
}
static TerrainPixel terrain_generator_sand(int x, int y, fnl_state noise) {
TerrainPixel initial_terrain = terrain_get_pixel(x, y, TERRAIN_NONE);
if (initial_terrain.type == TERRAIN_NONE_TYPE || ((
terrain_get_pixel(x - 1, y + 1, TERRAIN_NONE).type == TERRAIN_NONE_TYPE ||
terrain_get_pixel(x + 1, y + 1, TERRAIN_NONE).type == TERRAIN_NONE_TYPE ||
terrain_get_pixel(x + 0, y + 1, TERRAIN_NONE).type == TERRAIN_NONE_TYPE
)))
return initial_terrain;
double sx = x;
double sy = y * 2;
double sx_n = sx / (TERRAIN_SIZE); // sx, normalized to [0..1]
double sy_n = sy / (TERRAIN_SIZE * 2); // sy, normalized to [0..1]
// double sy_n2 = sy_n * sy_n;
// double sy_n3 = sy_n2 * sy_n;
// double sy_n4 = sy_n2 * sy_n2;
// double sy_n5 = sy_n3 * sy_n2;
// double sy_n6 = sy_n3 * sy_n3;
// double sy_n7 = sy_n4 * sy_n3;
// double sy_n8 = sy_n4 * sy_n4;
// double sy_n9 = sy_n5 * sy_n4;
// double sy_n10 = sy_n5 * sy_n5;
//// double threshold = pow(fabs(0.5 - sy_n) / sy_n, 2) * pow(sin(M_PI * sy_n), 3) * 2;
//// double threshold = -2 * cos(2 * M_PI * (sy_n - 0.5));
// double threshold = -1.02 + 1.68 * sy_n + 8.3 * sy_n2 + -101 * sy_n3 + 129 * sy_n4 + 633 * sy_n5 + -1891 * sy_n6 + 1825 * sy_n7 + -605 * sy_n8;
// threshold = (threshold + 1) / 2;
// threshold *= 0.5;
// threshold = threshold * 2 - 1;
double threshold = -0.875;
double noise_shift = 0; // Positive => More common blobs. Negative => Less common blobs.
threshold -= noise_shift;
if (threshold <= -1 || fnlGetNoise2D(&noise, sx, sy) > threshold) return initial_terrain;
return TERRAIN_SAND;
}
void terrain_generate(int seed) {
if (terrain_tree_root.childNW != NULL) {
destroy_terrain_tree(&terrain_tree_root);
}
terrain_tree_root = (TerrainTreeNode) {
.metadata = {
.need_updating = true,
.terminal_node = false,
.parent_on_left = 0,
.parent_on_top = 0,
},
.tp_offset = -1,
.tp_x = -1,
.tp_y = -1,
.parent = NULL,
.childNW = NULL,
.childNE = NULL,
.childSW = NULL,
.childSE = NULL,
};
// initialize_terrain_tree(&terrain_tree_root, 0);
fnl_state dirt_noise = fnlCreateState();
dirt_noise.seed = seed;
dirt_noise.noise_type = FNL_NOISE_OPENSIMPLEX2;
dirt_noise.gain = 0.5f;
dirt_noise.octaves = 1;
dirt_noise.frequency = 0.0025f;
dirt_noise.fractal_type = FNL_FRACTAL_NONE;
xor_srand(seed);
int sand_seed = xor_rand_int32();
fnl_state sand_noise = fnlCreateState();
sand_noise.seed = sand_seed;
sand_noise.noise_type = FNL_NOISE_OPENSIMPLEX2;
sand_noise.gain = 0.5f;
sand_noise.octaves = 1;
sand_noise.frequency = 0.005f;
sand_noise.fractal_type = FNL_FRACTAL_NONE;
memset(TERRAIN, 0, TERRAIN_PIXELS * sizeof(byte));
for (int y = 0; y < TERRAIN_SIZE; ++y) {
for (int x = 0; x < TERRAIN_SIZE; ++x) {
TERRAIN[y * TERRAIN_SIZE + x] = terrain_generator_dirt(x, y, dirt_noise);
}
}
for (int y = TERRAIN_SIZE - 1; y >= 0; --y) {
for (int x = 0; x < TERRAIN_SIZE; ++x) {
TerrainPixel tp = terrain_generator_sand(x, y, sand_noise);
if (tp.type == TERRAIN_SAND_TYPE) {
terrain_set_pixel(x, y, tp, true);
} else {
TERRAIN[y * TERRAIN_SIZE + x] = tp;
}
}
}
}
int sand_can_occupy(int x, int y) {
byte mat = terrain_get_pixel(x, y, TERRAIN_NONE).type;
return mat == TERRAIN_NONE_TYPE || mat == TERRAIN_XHST_TYPE || mat == TERRAIN_SMKE_TYPE;
}
void update_sand_at(int x, int y) {
TerrainPixel tp = terrain_get_pixel(x, y, TERRAIN_NONE);
if (tp.type == TERRAIN_SAND_TYPE && !tp.has_moved) {
byte open_spaces = 0x00;
if (sand_can_occupy(x - 1, y - 1)) open_spaces |= 0b10000000u;
if (sand_can_occupy(x + 0, y - 1)) open_spaces |= 0b01000000u;
if (sand_can_occupy(x + 1, y - 1)) open_spaces |= 0b00100000u;
if (sand_can_occupy(x - 1, y + 0)) open_spaces |= 0b00010000u;
if (sand_can_occupy(x + 1, y + 0)) open_spaces |= 0b00001000u;
if (sand_can_occupy(x - 1, y + 1)) open_spaces |= 0b00000100u;
if (sand_can_occupy(x + 0, y + 1)) open_spaces |= 0b00000010u;
if (sand_can_occupy(x + 1, y + 1)) open_spaces |= 0b00000001u;
//Sand only falls down, so always consider the space above occupied
open_spaces &= 0b00011111u;
int dx = 0, dy = 0;
if (open_spaces & 0b00000111u) {
dy = 1;
//Move down
switch (open_spaces & 0b00000111u) {
case 0b111: dx = rand_sample((const int[]) {-1, 0, 0, 0, 1}, 5);
break;
case 0b110: dx = rand_sample((const int[]) {-1, 0, 0, 0, 0}, 5);
break;
case 0b011: dx = rand_sample((const int[]) {0, 0, 0, 0, 1}, 5);
break;
case 0b010: dx = 0;
break;
case 0b101: dx = rand_sample((const int[]) {-1, 1}, 2);
break;
case 0b100: dx = -1;
break;
case 0b001: dx = 1;
break;
default: dx = 0;
}
}
if (dx || dy) {
if (dx < -1) dx = -1; else if (dx > 1) dx = 1;
tp.has_moved = true;
TerrainPixel tmp = terrain_get_pixel(x + dx, y + dy, TERRAIN_NONE);
terrain_set_pixel(x + dx, y + dy, tp, true);
terrain_set_pixel(x, y, tmp, true);
} else {
tp.has_moved = false;
tp.needs_update = false;
terrain_set_pixel(x, y, tp, false);
}
}
}
void update_xhst_at(int x, int y) {
TerrainPixel tp = terrain_get_pixel(x, y, TERRAIN_NONE);
if (tp.type == TERRAIN_XHST_TYPE && !tp.has_moved) {
//Exhaust falls down, so always consider the space above occupied
byte neighbors = 0b11100000u;
if (terrain_get_pixel(x - 1, y - 1, TERRAIN_NONE).type != TERRAIN_NONE_TYPE) neighbors |= 0b10000000u;
if (terrain_get_pixel(x + 0, y - 1, TERRAIN_NONE).type != TERRAIN_NONE_TYPE) neighbors |= 0b01000000u;
if (terrain_get_pixel(x + 1, y - 1, TERRAIN_NONE).type != TERRAIN_NONE_TYPE) neighbors |= 0b00100000u;
if (terrain_get_pixel(x - 1, y + 0, TERRAIN_NONE).type != TERRAIN_NONE_TYPE) neighbors |= 0b00010000u;
if (terrain_get_pixel(x + 1, y + 0, TERRAIN_NONE).type != TERRAIN_NONE_TYPE) neighbors |= 0b00001000u;
if (terrain_get_pixel(x - 1, y + 1, TERRAIN_NONE).type != TERRAIN_NONE_TYPE) neighbors |= 0b00000100u;
if (terrain_get_pixel(x + 0, y + 1, TERRAIN_NONE).type != TERRAIN_NONE_TYPE) neighbors |= 0b00000010u;
if (terrain_get_pixel(x + 1, y + 1, TERRAIN_NONE).type != TERRAIN_NONE_TYPE) neighbors |= 0b00000001u;
byte open_spaces = ~neighbors;
int dx = 0, dy = 0;
if (open_spaces & 0b00000111u) {
dy = 1;
//Move down
switch (open_spaces & 0b00000111u) {
case 0b111: dx = rand_sample((const int[]) {-1, 0, 1}, 3);
break;
case 0b110: dx = rand_sample((const int[]) {-1, 0}, 2);
break;
case 0b011: dx = rand_sample((const int[]) {0, 1}, 2);
break;
case 0b010: dx = 0;
break;
case 0b101: dx = rand_sample((const int[]) {-1, 1}, 2);
break;
case 0b100: dx = -1;
break;
case 0b001: dx = 1;
break;
default: dx = 0;
}
}
if (dx || dy) {
if (dx < -1) dx = -1; else if (dx > 1) dx = 1;
tp.has_moved = true;
TerrainPixel tmp = terrain_get_pixel(x + dx, y + dy, TERRAIN_NONE);
if (xor_rand_double() < 0.95) {
terrain_set_pixel(x + dx, y + dy, tp, true);
}
terrain_set_pixel(x, y, tmp, true);
} else {
if (!sand_can_occupy(x - 1, y - 1) ||
!sand_can_occupy(x + 0, y - 1) ||
!sand_can_occupy(x + 1, y - 1) ||
!sand_can_occupy(x - 1, y + 0) ||
!sand_can_occupy(x + 1, y + 0) ||
!sand_can_occupy(x - 1, y + 1) ||
!sand_can_occupy(x + 0, y + 1) ||
!sand_can_occupy(x + 1, y + 1))
terrain_set_pixel(x, y, TERRAIN_NONE, false);
}
}
}
int smke_can_occupy(int x, int y) {
byte mat = terrain_get_pixel(x, y, TERRAIN_NONE).type;
return mat == TERRAIN_NONE_TYPE || mat == TERRAIN_XHST_TYPE || mat == TERRAIN_SMKE_TYPE;
}
void update_smke_at(int x, int y) {
TerrainPixel tp = terrain_get_pixel(x, y, TERRAIN_NONE);
if (tp.type == TERRAIN_SMKE_TYPE && !tp.has_moved) {
//Smoke rises, so always consider the space below occupied
byte neighbors = 0b00000111u;
if (terrain_get_pixel(x - 1, y - 1, TERRAIN_NONE).type != TERRAIN_NONE_TYPE) neighbors |= 0b10000000u;
if (terrain_get_pixel(x + 0, y - 1, TERRAIN_NONE).type != TERRAIN_NONE_TYPE) neighbors |= 0b01000000u;
if (terrain_get_pixel(x + 1, y - 1, TERRAIN_NONE).type != TERRAIN_NONE_TYPE) neighbors |= 0b00100000u;
if (terrain_get_pixel(x - 1, y + 0, TERRAIN_NONE).type != TERRAIN_NONE_TYPE) neighbors |= 0b00010000u;
if (terrain_get_pixel(x + 1, y + 0, TERRAIN_NONE).type != TERRAIN_NONE_TYPE) neighbors |= 0b00001000u;
if (terrain_get_pixel(x - 1, y + 1, TERRAIN_NONE).type != TERRAIN_NONE_TYPE) neighbors |= 0b00000100u;
if (terrain_get_pixel(x + 0, y + 1, TERRAIN_NONE).type != TERRAIN_NONE_TYPE) neighbors |= 0b00000010u;
if (terrain_get_pixel(x + 1, y + 1, TERRAIN_NONE).type != TERRAIN_NONE_TYPE) neighbors |= 0b00000001u;
byte open_spaces = ~neighbors;
int i = 0;
if (open_spaces & 0b11111000u) {
// @formatter:off
switch ((open_spaces & 0b11111000u) >> 3) {
case 0b00000: i = rand_sample((const int[]) {0,0,0,0,0,0}, 6); break;
case 0b00001: i = rand_sample((const int[]) {0,1,0,0,0,0}, 6); break;
case 0b00010: i = rand_sample((const int[]) {0,0,2,0,0,0}, 6); break;
case 0b00011: i = rand_sample((const int[]) {0,1,2,0,0,0}, 6); break;
case 0b00100: i = rand_sample((const int[]) {0,0,0,3,0,0}, 6); break;
case 0b00101: i = rand_sample((const int[]) {0,1,0,3,0,0}, 6); break;
case 0b00110: i = rand_sample((const int[]) {0,0,2,3,0,0}, 6); break;
case 0b00111: i = rand_sample((const int[]) {0,1,2,3,0,0}, 6); break;
case 0b01000: i = rand_sample((const int[]) {0,0,0,0,4,0}, 6); break;
case 0b01001: i = rand_sample((const int[]) {0,1,0,0,4,0}, 6); break;
case 0b01010: i = rand_sample((const int[]) {0,0,2,0,4,0}, 6); break;
case 0b01011: i = rand_sample((const int[]) {0,1,2,0,4,0}, 6); break;
case 0b01100: i = rand_sample((const int[]) {0,0,0,3,4,0}, 6); break;
case 0b01101: i = rand_sample((const int[]) {0,1,0,3,4,0}, 6); break;
case 0b01110: i = rand_sample((const int[]) {0,0,2,3,4,0}, 6); break;
case 0b01111: i = rand_sample((const int[]) {0,1,2,3,4,0}, 6); break;
case 0b10000: i = rand_sample((const int[]) {0,0,0,0,0,5}, 6); break;
case 0b10001: i = rand_sample((const int[]) {0,1,0,0,0,5}, 6); break;
case 0b10010: i = rand_sample((const int[]) {0,0,2,0,0,5}, 6); break;
case 0b10011: i = rand_sample((const int[]) {0,1,2,0,0,5}, 6); break;
case 0b10100: i = rand_sample((const int[]) {0,0,0,3,0,5}, 6); break;
case 0b10101: i = rand_sample((const int[]) {0,1,0,3,0,5}, 6); break;
case 0b10110: i = rand_sample((const int[]) {0,0,2,3,0,5}, 6); break;
case 0b10111: i = rand_sample((const int[]) {0,1,2,3,0,5}, 6); break;
case 0b11000: i = rand_sample((const int[]) {0,0,0,0,4,5}, 6); break;
case 0b11001: i = rand_sample((const int[]) {0,1,0,0,4,5}, 6); break;
case 0b11010: i = rand_sample((const int[]) {0,0,2,0,4,5}, 6); break;
case 0b11011: i = rand_sample((const int[]) {0,1,2,0,4,5}, 6); break;
case 0b11100: i = rand_sample((const int[]) {0,0,0,3,4,5}, 6); break;
case 0b11101: i = rand_sample((const int[]) {0,1,0,3,4,5}, 6); break;
case 0b11110: i = rand_sample((const int[]) {0,0,2,3,4,5}, 6); break;
case 0b11111: i = rand_sample((const int[]) {0,1,2,3,4,5}, 6); break;
default: i = 0;
}
// @formatter:on
}
int dx = ((const int[]) {0, 1, -1, 1, 0, -1})[i];
int dy = ((const int[]) {0, 0, 0, -1, -1, -1})[i];
if (dx || dy) {
if (dx < -1) dx = -1; else if (dx > 1) dx = 1;
tp.has_moved = true;
TerrainPixel tmp = terrain_get_pixel(x + dx, y + dy, TERRAIN_NONE);
if (xor_rand_double() < 0.99) {
terrain_set_pixel(x + dx, y + dy, tp, true);
}
terrain_set_pixel(x, y, tmp, true);
} else {
if (!smke_can_occupy(x - 1, y - 1) ||
!smke_can_occupy(x + 0, y - 1) ||
!smke_can_occupy(x + 1, y - 1) ||
!smke_can_occupy(x - 1, y + 0) ||
!smke_can_occupy(x + 1, y + 0) ||
!smke_can_occupy(x - 1, y + 1) ||
!smke_can_occupy(x + 0, y + 1) ||
!smke_can_occupy(x + 1, y + 1) ||
xor_rand_double() > 0.99)
terrain_set_pixel(x, y, TERRAIN_NONE, false);
else
terrain_set_pixel(x, y, tp, true);
}
}
}
void terrain_update_bottom_up() {
//FIXME: Need to move from the bottom up to make convincing falling sand effect,
// and stagger the columns to avoid updating the same particle multiple times.
// Maybe we should keep track of which particles still need to be updated?
// We should *definitely* switch to using a quadtree to reduce the number of pixel tests.
for (int y = TERRAIN_SIZE - 1; y >= 0; --y) {
for (int x = 0; x < TERRAIN_SIZE; ++x) {
update_sand_at((x * 2) % TERRAIN_SIZE + (x * 2) / TERRAIN_SIZE, y);
update_xhst_at((x * 2) % TERRAIN_SIZE + (x * 2) / TERRAIN_SIZE, y);
}
}
}
void terrain_update_quadtree_bottom_up(TerrainTreeNode *node) {
if (!node) return;
if (node->metadata.need_updating) {
if (node->metadata.terminal_node) {
update_sand_at(node->tp_x, node->tp_y);
update_xhst_at(node->tp_x, node->tp_y);
} else {
if (xor_rand_double() < 0.5) {
terrain_update_quadtree_bottom_up(node->childNW);
terrain_update_quadtree_bottom_up(node->childNE);
terrain_update_quadtree_bottom_up(node->childSW);
terrain_update_quadtree_bottom_up(node->childSE);
} else {
terrain_update_quadtree_bottom_up(node->childNE);
terrain_update_quadtree_bottom_up(node->childNW);
terrain_update_quadtree_bottom_up(node->childSE);
terrain_update_quadtree_bottom_up(node->childSW);
}
}
}
}
void terrain_update_quadtree_top_down(TerrainTreeNode *node) {
if (!node) return;
if (node->metadata.need_updating) {
if (node->metadata.terminal_node) {
update_smke_at(node->tp_x, node->tp_y);
} else {
if (xor_rand_double() < 0.5) {
terrain_update_quadtree_top_down(node->childSW);
terrain_update_quadtree_top_down(node->childSE);
terrain_update_quadtree_top_down(node->childNW);
terrain_update_quadtree_top_down(node->childNE);
} else {
terrain_update_quadtree_top_down(node->childSE);
terrain_update_quadtree_top_down(node->childSW);
terrain_update_quadtree_top_down(node->childNE);
terrain_update_quadtree_top_down(node->childNW);
}
}
}
}
void terrain_update_top_down() {
//FIXME: Need to stagger x and y to avoid updating the same particle multiple times.
// Maybe we should keep track of which particles still need to be updated?
// We should *definitely* switch to using a quadtree to reduce the number of pixel tests.
#pragma omp parallel for
for (int y = 0; y < TERRAIN_SIZE; ++y) {
for (int x = 0; x < TERRAIN_SIZE; ++x) {
update_smke_at((x * 2) % TERRAIN_SIZE + (x * 2) / TERRAIN_SIZE, y);
}
}
}
double terrain_get_pixel_solidness(int x, int y, double edge_val, double dynamic_mod) {
if (x < 0 || y < 0 || x >= TERRAIN_SIZE || y >= TERRAIN_SIZE) return edge_val;
TerrainPixel tp = terrain_get_pixel(x, y, TERRAIN_NONE);
// Dirt is always solid
if (tp.type == TERRAIN_DIRT_TYPE) return 1.0;
// Sand is solid if stable, but less solid if moving.
if (tp.type == TERRAIN_SAND_TYPE) {
if (tp.has_moved) return 0.0;
return 1.0;
} //TODO: Falling sand should be less solid than static sand.
// Exhaust is never solid
if (tp.type == TERRAIN_XHST_TYPE) return 0.0;
// Smoke is never solid
if (tp.type == TERRAIN_SMKE_TYPE) return 0.0;
return 0.0;
}
void terrain_update() {
//#pragma omp parallel for
// for (int o = 0; o < TERRAIN_PIXELS; ++o) {
// TERRAIN[o].has_moved = false;
// }
// terrain_update_bottom_up();
// terrain_update_top_down();
terrain_refresh_quadtree(&terrain_tree_root);
terrain_update_quadtree_top_down(&terrain_tree_root);
terrain_update_quadtree_bottom_up(&terrain_tree_root);
}
|
if-clause.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int main(int argc, char **argv)
{
int i, n=20, tid, x;
int a[n],suma=0,sumalocal;
if(argc < 3) {
fprintf(stderr,"[ERROR]-Faltan parametros: iteraciones nthreads\n");
exit(-1);
}
n = atoi(argv[1]); if (n>20) n=20;
x = atoi(argv[2]);
for (i=0; i<n; i++) {
a[i] = i;
}
#pragma omp parallel if(n>4) default(none) \
private(sumalocal,tid) shared(a,suma,n) num_threads(x)
//No se ejecutará en paralelo si el n < 4
{ sumalocal=0;
tid=omp_get_thread_num();
#pragma omp for private(i) schedule(static) nowait
for (i=0; i<n; i++)
{ sumalocal += a[i];
printf(" thread %d suma de a[%d]=%d sumalocal=%d \n",
tid,i,a[i],sumalocal);
}
#pragma omp atomic
suma += sumalocal;
#pragma omp barrier
#pragma omp master
printf("thread master=%d imprime suma=%d\n",tid,suma);
}
return(0);
}
|
displacement_lagrangemultiplier_residual_contact_criteria.h | // KRATOS ___| | | |
// \___ \ __| __| | | __| __| | | __| _` | |
// | | | | | ( | | | | ( | |
// _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS
//
// License: BSD License
// license: StructuralMechanicsApplication/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_RESIDUAL_CONTACT_CRITERIA_H)
#define KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_RESIDUAL_CONTACT_CRITERIA_H
/* System includes */
/* External includes */
/* Project includes */
#include "utilities/table_stream_utility.h"
#include "solving_strategies/convergencecriterias/convergence_criteria.h"
#include "utilities/color_utilities.h"
#include "utilities/constraint_utilities.h"
namespace Kratos
{
///@addtogroup ContactStructuralMechanicsApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@name Kratos Classes
///@{
/**
* @class DisplacementLagrangeMultiplierResidualContactCriteria
* @ingroup ContactStructuralMechanicsApplication
* @brief Convergence criteria for contact problems
* This class implements a convergence control based on nodal displacement and
* lagrange multiplier values. The error is evaluated separately for each of them, and
* relative and absolute tolerances for both must be specified.
* @author Vicente Mataix Ferrandiz
*/
template< class TSparseSpace,
class TDenseSpace >
class DisplacementLagrangeMultiplierResidualContactCriteria
: public ConvergenceCriteria< TSparseSpace, TDenseSpace >
{
public:
///@name Type Definitions
///@{
/// Pointer definition of DisplacementLagrangeMultiplierResidualContactCriteria
KRATOS_CLASS_POINTER_DEFINITION( DisplacementLagrangeMultiplierResidualContactCriteria );
/// Local Flags
KRATOS_DEFINE_LOCAL_FLAG( ENSURE_CONTACT );
KRATOS_DEFINE_LOCAL_FLAG( PRINTING_OUTPUT );
KRATOS_DEFINE_LOCAL_FLAG( TABLE_IS_INITIALIZED );
KRATOS_DEFINE_LOCAL_FLAG( ROTATION_DOF_IS_CONSIDERED );
KRATOS_DEFINE_LOCAL_FLAG( INITIAL_RESIDUAL_IS_SET );
/// The base class definition (and it subclasses)
typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
/// The sparse space used
typedef TSparseSpace SparseSpaceType;
/// The r_table stream definition TODO: Replace by logger
typedef TableStreamUtility::Pointer TablePrinterPointerType;
/// The index type definition
typedef std::size_t IndexType;
/// The key type definition
typedef std::size_t KeyType;
///@}
///@name Life Cycle
///@{
/**
* @brief Default constructor (parameters)
* @param DispRatioTolerance Relative tolerance for displacement residual error
* @param DispAbsTolerance Absolute tolerance for displacement residual error
* @param RotRatioTolerance Relative tolerance for rotation residual error
* @param RotAbsTolerance Absolute tolerance for rotation residual error
* @param LMRatioTolerance Relative tolerance for lagrange multiplier residual error
* @param LMAbsTolerance Absolute tolerance for lagrange multiplier residual error
* @param EnsureContact To check if the contact is lost
* @param pTable The pointer to the output r_table
* @param PrintingOutput If the output is going to be printed in a txt file
*/
explicit DisplacementLagrangeMultiplierResidualContactCriteria(
const TDataType DispRatioTolerance,
const TDataType DispAbsTolerance,
const TDataType RotRatioTolerance,
const TDataType RotAbsTolerance,
const TDataType LMRatioTolerance,
const TDataType LMAbsTolerance,
const bool EnsureContact = false,
const bool PrintingOutput = false
)
: BaseType()
{
// Set local flags
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::ENSURE_CONTACT, EnsureContact);
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT, PrintingOutput);
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::TABLE_IS_INITIALIZED, false);
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED, false);
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, false);
// The displacement residual
mDispRatioTolerance = DispRatioTolerance;
mDispAbsTolerance = DispAbsTolerance;
// The rotation residual
mRotRatioTolerance = RotRatioTolerance;
mRotAbsTolerance = RotAbsTolerance;
// The normal contact residual
mLMRatioTolerance = LMRatioTolerance;
mLMAbsTolerance = LMAbsTolerance;
}
/**
* @brief Default constructor (parameters)
* @param ThisParameters The configuration parameters
*/
explicit DisplacementLagrangeMultiplierResidualContactCriteria( Parameters ThisParameters = Parameters(R"({})"))
: BaseType()
{
// Validate and assign defaults
ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters());
this->AssignSettings(ThisParameters);
}
//* Copy constructor.
DisplacementLagrangeMultiplierResidualContactCriteria( DisplacementLagrangeMultiplierResidualContactCriteria const& rOther )
:BaseType(rOther)
,mOptions(rOther.mOptions)
,mDispRatioTolerance(rOther.mDispRatioTolerance)
,mDispAbsTolerance(rOther.mDispAbsTolerance)
,mDispInitialResidualNorm(rOther.mDispInitialResidualNorm)
,mDispCurrentResidualNorm(rOther.mDispCurrentResidualNorm)
,mRotRatioTolerance(rOther.mRotRatioTolerance)
,mRotAbsTolerance(rOther.mRotAbsTolerance)
,mRotInitialResidualNorm(rOther.mRotInitialResidualNorm)
,mRotCurrentResidualNorm(rOther.mRotCurrentResidualNorm)
,mLMRatioTolerance(rOther.mLMRatioTolerance)
,mLMAbsTolerance(rOther.mLMAbsTolerance)
,mLMInitialResidualNorm(rOther.mLMInitialResidualNorm)
,mLMCurrentResidualNorm(rOther.mLMCurrentResidualNorm)
{
}
/// Destructor.
~DisplacementLagrangeMultiplierResidualContactCriteria() override = default;
///@}
///@name Operators
///@{
/**
* @brief Compute relative and absolute error.
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
* @return true if convergence is achieved, false otherwise
*/
bool PostCriteria(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
if (SparseSpaceType::Size(rb) != 0) { //if we are solving for something
// Initialize
TDataType disp_residual_solution_norm = 0.0, rot_residual_solution_norm = 0.0, lm_residual_solution_norm = 0.0;
IndexType disp_dof_num(0), rot_dof_num(0), lm_dof_num(0);
// First iterator
const auto it_dof_begin = rDofSet.begin();
// Auxiliar values
std::size_t dof_id = 0;
TDataType residual_dof_value = 0.0;
// The number of active dofs
const std::size_t number_active_dofs = rb.size();
// Auxiliar displacement DoF check
const std::function<bool(const VariableData&)> check_without_rot =
[](const VariableData& rCurrVar) -> bool {return true;};
const std::function<bool(const VariableData&)> check_with_rot =
[](const VariableData& rCurrVar) -> bool {return ((rCurrVar == DISPLACEMENT_X) || (rCurrVar == DISPLACEMENT_Y) || (rCurrVar == DISPLACEMENT_Z));};
const auto* p_check_disp = (mOptions.Is(DisplacementLagrangeMultiplierResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) ? &check_with_rot : &check_without_rot;
// Loop over Dofs
#pragma omp parallel for firstprivate(dof_id, residual_dof_value) reduction(+:disp_residual_solution_norm,rot_residual_solution_norm,lm_residual_solution_norm,disp_dof_num,rot_dof_num,lm_dof_num)
for (int i = 0; i < static_cast<int>(rDofSet.size()); i++) {
auto it_dof = it_dof_begin + i;
dof_id = it_dof->EquationId();
// Check dof id is solved
if (dof_id < number_active_dofs) {
if (mActiveDofs[dof_id] == 1) {
residual_dof_value = rb[dof_id];
const auto& r_curr_var = it_dof->GetVariable();
if ((r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_X) || (r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_Y) || (r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_Z) || (r_curr_var == LAGRANGE_MULTIPLIER_CONTACT_PRESSURE)) {
lm_residual_solution_norm += std::pow(residual_dof_value, 2);
++lm_dof_num;
} else if ((*p_check_disp)(r_curr_var)) {
disp_residual_solution_norm += std::pow(residual_dof_value, 2);
++disp_dof_num;
} else { // We will assume is rotation dof
KRATOS_DEBUG_ERROR_IF_NOT((r_curr_var == ROTATION_X) || (r_curr_var == ROTATION_Y) || (r_curr_var == ROTATION_Z)) << "Variable must be a ROTATION and it is: " << r_curr_var.Name() << std::endl;
rot_residual_solution_norm += std::pow(residual_dof_value, 2);
++rot_dof_num;
}
}
}
}
mDispCurrentResidualNorm = disp_residual_solution_norm;
mRotCurrentResidualNorm = rot_residual_solution_norm;
mLMCurrentResidualNorm = lm_residual_solution_norm;
TDataType residual_disp_ratio = 1.0;
TDataType residual_rot_ratio = 1.0;
TDataType residual_lm_ratio = 1.0;
// We initialize the solution
if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::INITIAL_RESIDUAL_IS_SET)) {
mDispInitialResidualNorm = (disp_residual_solution_norm == 0.0) ? 1.0 : disp_residual_solution_norm;
mLMInitialResidualNorm = (lm_residual_solution_norm == 0.0) ? 1.0 : lm_residual_solution_norm;
residual_disp_ratio = 1.0;
residual_lm_ratio = 1.0;
if (mOptions.Is(DisplacementLagrangeMultiplierResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) {
mRotInitialResidualNorm = (rot_residual_solution_norm == 0.0) ? 1.0 : rot_residual_solution_norm;
residual_rot_ratio = 1.0;
}
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, true);
}
// We calculate the ratio of the displacements
residual_disp_ratio = mDispCurrentResidualNorm/mDispInitialResidualNorm;
// We calculate the ratio of the rotations
residual_rot_ratio = mRotCurrentResidualNorm/mRotInitialResidualNorm;
// We calculate the ratio of the LM
residual_lm_ratio = mLMCurrentResidualNorm/mLMInitialResidualNorm;
KRATOS_ERROR_IF(mOptions.Is(DisplacementLagrangeMultiplierResidualContactCriteria::ENSURE_CONTACT) && residual_lm_ratio == 0.0) << "ERROR::CONTACT LOST::ARE YOU SURE YOU ARE SUPPOSED TO HAVE CONTACT?" << std::endl;
// We calculate the absolute norms
const TDataType residual_disp_abs = mDispCurrentResidualNorm/disp_dof_num;
const TDataType residual_rot_abs = mRotCurrentResidualNorm/rot_dof_num;
const TDataType residual_lm_abs = mLMCurrentResidualNorm/lm_dof_num;
// The process info of the model part
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
// We print the results // TODO: Replace for the new log
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
std::cout.precision(4);
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& Table = p_table->GetTable();
if (mOptions.Is(DisplacementLagrangeMultiplierResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) {
Table << residual_disp_ratio << mDispRatioTolerance << residual_disp_abs << mDispAbsTolerance << residual_rot_ratio << mRotRatioTolerance << residual_rot_abs << mRotAbsTolerance << residual_lm_ratio << mLMRatioTolerance << residual_lm_abs << mLMAbsTolerance;
} else {
Table << residual_disp_ratio << mDispRatioTolerance << residual_disp_abs << mDispAbsTolerance << residual_lm_ratio << mLMRatioTolerance << residual_lm_abs << mLMAbsTolerance;
}
} else {
std::cout.precision(4);
if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT)) {
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("RESIDUAL CONVERGENCE CHECK") << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl << std::scientific;
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("\tDISPLACEMENT: RATIO = ") << residual_disp_ratio << BOLDFONT(" EXP.RATIO = ") << mDispRatioTolerance << BOLDFONT(" ABS = ") << residual_disp_abs << BOLDFONT(" EXP.ABS = ") << mDispAbsTolerance << std::endl;
if (mOptions.Is(DisplacementLagrangeMultiplierResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) {
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("\tROTATION: RATIO = ") << residual_rot_ratio << BOLDFONT(" EXP.RATIO = ") << mRotRatioTolerance << BOLDFONT(" ABS = ") << residual_rot_abs << BOLDFONT(" EXP.ABS = ") << mRotAbsTolerance << std::endl;
}
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("\tLAGRANGE MUL: RATIO = ") << residual_lm_ratio << BOLDFONT(" EXP.RATIO = ") << mLMRatioTolerance << BOLDFONT(" ABS = ") << residual_lm_abs << BOLDFONT(" EXP.ABS = ") << mLMAbsTolerance << std::endl;
} else {
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "RESIDUAL CONVERGENCE CHECK" << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl << std::scientific;
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "\tDISPLACEMENT: RATIO = " << residual_disp_ratio << " EXP.RATIO = " << mDispRatioTolerance << " ABS = " << residual_disp_abs << " EXP.ABS = " << mDispAbsTolerance << std::endl;
if (mOptions.Is(DisplacementLagrangeMultiplierResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) {
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "\tROTATION: RATIO = " << residual_rot_ratio << " EXP.RATIO = " << mRotRatioTolerance << " ABS = " << residual_rot_abs << " EXP.ABS = " << mRotAbsTolerance << std::endl;
}
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "\tLAGRANGE MUL: RATIO = " << residual_lm_ratio << " EXP.RATIO = " << mLMRatioTolerance << " ABS = " << residual_lm_abs << " EXP.ABS = " << mLMAbsTolerance << std::endl;
}
}
}
r_process_info[CONVERGENCE_RATIO] = (residual_disp_ratio > residual_lm_ratio) ? residual_disp_ratio : residual_lm_ratio;
r_process_info[RESIDUAL_NORM] = (residual_lm_abs > mLMAbsTolerance) ? residual_lm_abs : mLMAbsTolerance;
// We check if converged
const bool disp_converged = (residual_disp_ratio <= mDispRatioTolerance || residual_disp_abs <= mDispAbsTolerance);
const bool rot_converged = (mOptions.Is(DisplacementLagrangeMultiplierResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) ? (residual_rot_ratio <= mRotRatioTolerance || residual_rot_abs <= mRotAbsTolerance) : true;
const bool lm_converged = (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::ENSURE_CONTACT) && residual_lm_ratio == 0.0) ? true : (residual_lm_ratio <= mLMRatioTolerance || residual_lm_abs <= mLMAbsTolerance);
if (disp_converged && rot_converged && lm_converged ) {
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& Table = p_table->GetTable();
if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT))
Table << BOLDFONT(FGRN(" Achieved"));
else
Table << "Achieved";
} else {
if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT))
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("\tResidual") << " convergence is " << BOLDFONT(FGRN("achieved")) << std::endl;
else
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "\tResidual convergence is achieved" << std::endl;
}
}
return true;
} else {
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT))
r_table << BOLDFONT(FRED(" Not achieved"));
else
r_table << "Not achieved";
} else {
if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT))
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("\tResidual") << " convergence is " << BOLDFONT(FRED(" not achieved")) << std::endl;
else
KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "\tResidual convergence is not achieved" << std::endl;
}
}
return false;
}
} else // In this case all the displacements are imposed!
return true;
}
/**
* @brief This function initialize the convergence criteria
* @param rModelPart Reference to the ModelPart containing the contact problem. (unused)
*/
void Initialize( ModelPart& rModelPart) override
{
BaseType::mConvergenceCriteriaIsInitialized = true;
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
if (r_process_info.Has(TABLE_UTILITY) && mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::TABLE_IS_INITIALIZED)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
r_table.AddColumn("DP RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) {
r_table.AddColumn("RT RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
}
r_table.AddColumn("LM RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
r_table.AddColumn("CONVERGENCE", 15);
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::TABLE_IS_INITIALIZED, true);
}
// Check rotation dof
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED, ContactUtilities::CheckModelPartHasRotationDoF(rModelPart));
}
/**
* @brief This function initializes the solution step
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
*/
void InitializeSolutionStep(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
// Initialize flag
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, false);
// Filling mActiveDofs when MPC exist
ConstraintUtilities::ComputeActiveDofs(rModelPart, mActiveDofs, rDofSet);
}
/**
* @brief This method provides the defaults parameters to avoid conflicts between the different constructors
* @return The default parameters
*/
Parameters GetDefaultParameters() const override
{
Parameters default_parameters = Parameters(R"(
{
"name" : "displacement_lagrangemultiplier_residual_contact_criteria",
"ensure_contact" : false,
"print_convergence_criterion" : false,
"residual_relative_tolerance" : 1.0e-4,
"residual_absolute_tolerance" : 1.0e-9,
"rotation_residual_relative_tolerance" : 1.0e-4,
"rotation_residual_absolute_tolerance" : 1.0e-9,
"contact_residual_relative_tolerance" : 1.0e-4,
"contact_residual_absolute_tolerance" : 1.0e-9
})");
// Getting base class default parameters
const Parameters base_default_parameters = BaseType::GetDefaultParameters();
default_parameters.RecursivelyAddMissingParameters(base_default_parameters);
return default_parameters;
}
/**
* @brief Returns the name of the class as used in the settings (snake_case format)
* @return The name of the class
*/
static std::string Name()
{
return "displacement_lagrangemultiplier_residual_contact_criteria";
}
///@}
///@name Operations
///@{
///@}
///@name Acces
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/**
* @brief This method assigns settings to member variables
* @param ThisParameters Parameters that are assigned to the member variables
*/
void AssignSettings(const Parameters ThisParameters) override
{
BaseType::AssignSettings(ThisParameters);
// The displacement residual
mDispRatioTolerance = ThisParameters["residual_relative_tolerance"].GetDouble();
mDispAbsTolerance = ThisParameters["residual_absolute_tolerance"].GetDouble();
// The rotation residual
mRotRatioTolerance = ThisParameters["rotation_residual_relative_tolerance"].GetDouble();
mRotAbsTolerance = ThisParameters["rotation_residual_absolute_tolerance"].GetDouble();
// The contact residual
mLMRatioTolerance = ThisParameters["contact_displacement_absolute_tolerance"].GetDouble();
mLMAbsTolerance = ThisParameters["contact_residual_absolute_tolerance"].GetDouble();
// Set local flags
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::ENSURE_CONTACT, ThisParameters["ensure_contact"].GetBool());
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT, ThisParameters["print_convergence_criterion"].GetBool());
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::TABLE_IS_INITIALIZED, false);
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED, false);
mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, false);
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
Flags mOptions; /// Local flags
TDataType mDispRatioTolerance; /// The ratio threshold for the norm of the displacement residual
TDataType mDispAbsTolerance; /// The absolute value threshold for the norm of the displacement residual
TDataType mDispInitialResidualNorm; /// The reference norm of the displacement residual
TDataType mDispCurrentResidualNorm; /// The current norm of the displacement residual
TDataType mRotRatioTolerance; /// The ratio threshold for the norm of the rotation residual
TDataType mRotAbsTolerance; /// The absolute value threshold for the norm of the rotation residual
TDataType mRotInitialResidualNorm; /// The reference norm of the rotation residual
TDataType mRotCurrentResidualNorm; /// The current norm of the rotation residual
TDataType mLMRatioTolerance; /// The ratio threshold for the norm of the LM residual
TDataType mLMAbsTolerance; /// The absolute value threshold for the norm of the LM residual
TDataType mLMInitialResidualNorm; /// The reference norm of the LM residual
TDataType mLMCurrentResidualNorm; /// The current norm of the LM residual
std::vector<int> mActiveDofs; /// This vector contains the dofs that are active
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Unaccessible methods
///@{
///@}
}; // Kratos DisplacementLagrangeMultiplierResidualContactCriteria
///@name Local flags creation
///@{
/// Local Flags
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::ENSURE_CONTACT(Kratos::Flags::Create(0));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::PRINTING_OUTPUT(Kratos::Flags::Create(1));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::TABLE_IS_INITIALIZED(Kratos::Flags::Create(2));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::ROTATION_DOF_IS_CONSIDERED(Kratos::Flags::Create(3));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::INITIAL_RESIDUAL_IS_SET(Kratos::Flags::Create(4));
}
#endif /* KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_RESIDUAL_CONTACT_CRITERIA_H */
|
oskar_cross_correlate_point_time_smearing_scalar_omp.c | /*
* Copyright (c) 2014-2015, The University of Oxford
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the University of Oxford nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <math.h>
#include "correlate/private_correlate_functions_inline.h"
#include "correlate/oskar_cross_correlate_point_time_smearing_scalar_omp.h"
#ifdef __cplusplus
extern "C" {
#endif
/* Single precision. */
void oskar_cross_correlate_point_time_smearing_scalar_omp_f(int num_sources,
int num_stations, const float2* jones, const float* source_I,
const float* source_l, const float* source_m, const float* source_n,
const float* station_u, const float* station_v,
const float* station_w, const float* station_x,
const float* station_y, float uv_min_lambda,
float uv_max_lambda, float inv_wavelength, float frac_bandwidth,
float time_int_sec, float gha0_rad, float dec0_rad, float2* vis)
{
int SQ;
/* Loop over stations. */
#pragma omp parallel for private(SQ) schedule(dynamic, 1)
for (SQ = 0; SQ < num_stations; ++SQ)
{
int SP, i;
const float2 *station_p, *station_q;
/* Pointer to source vector for station q. */
station_q = &jones[SQ * num_sources];
/* Loop over baselines for this station. */
for (SP = SQ + 1; SP < num_stations; ++SP)
{
float uv_len, uu, vv, ww, uu2, vv2, uuvv, du, dv, dw;
float2 sum, guard;
sum.x = 0.0f;
sum.y = 0.0f;
guard.x = 0.0f;
guard.y = 0.0f;
/* Pointer to source vector for station p. */
station_p = &jones[SP * num_sources];
/* Get common baseline values. */
oskar_evaluate_baseline_terms_inline_f(station_u[SP],
station_u[SQ], station_v[SP], station_v[SQ],
station_w[SP], station_w[SQ], inv_wavelength,
frac_bandwidth, &uv_len, &uu, &vv, &ww, &uu2, &vv2, &uuvv);
/* Apply the baseline length filter. */
if (uv_len < uv_min_lambda || uv_len > uv_max_lambda)
continue;
/* Compute the deltas for time-average smearing. */
oskar_evaluate_baseline_deltas_inline_f(station_x[SP],
station_x[SQ], station_y[SP], station_y[SQ],
inv_wavelength, time_int_sec, gha0_rad, dec0_rad,
&du, &dv, &dw);
/* Loop over sources. */
for (i = 0; i < num_sources; ++i)
{
float l, m, n, r1, r2;
/* Get source direction cosines. */
l = source_l[i];
m = source_m[i];
n = source_n[i];
/* Compute bandwidth- and time-smearing terms. */
r1 = oskar_sinc_f(uu * l + vv * m + ww * (n - 1.0f));
r2 = oskar_evaluate_time_smearing_f(du, dv, dw, l, m, n);
r1 *= r2;
/* Accumulate baseline visibility response for source. */
oskar_accumulate_baseline_visibility_for_source_scalar_inline_f(
&sum, i, source_I, station_p, station_q, r1, &guard);
}
/* Add result to the baseline visibility. */
i = oskar_evaluate_baseline_index_inline(num_stations, SP, SQ);
vis[i].x += sum.x;
vis[i].y += sum.y;
}
}
}
/* Double precision. */
void oskar_cross_correlate_point_time_smearing_scalar_omp_d(int num_sources,
int num_stations, const double2* jones, const double* source_I,
const double* source_l, const double* source_m, const double* source_n,
const double* station_u, const double* station_v,
const double* station_w, const double* station_x,
const double* station_y, double uv_min_lambda,
double uv_max_lambda, double inv_wavelength, double frac_bandwidth,
double time_int_sec, double gha0_rad, double dec0_rad, double2* vis)
{
int SQ;
/* Loop over stations. */
#pragma omp parallel for private(SQ) schedule(dynamic, 1)
for (SQ = 0; SQ < num_stations; ++SQ)
{
int SP, i;
const double2 *station_p, *station_q;
/* Pointer to source vector for station q. */
station_q = &jones[SQ * num_sources];
/* Loop over baselines for this station. */
for (SP = SQ + 1; SP < num_stations; ++SP)
{
double uv_len, uu, vv, ww, uu2, vv2, uuvv, du, dv, dw;
double2 sum;
sum.x = 0.0;
sum.y = 0.0;
/* Pointer to source vector for station p. */
station_p = &jones[SP * num_sources];
/* Get common baseline values. */
oskar_evaluate_baseline_terms_inline_d(station_u[SP],
station_u[SQ], station_v[SP], station_v[SQ],
station_w[SP], station_w[SQ], inv_wavelength,
frac_bandwidth, &uv_len, &uu, &vv, &ww, &uu2, &vv2, &uuvv);
/* Apply the baseline length filter. */
if (uv_len < uv_min_lambda || uv_len > uv_max_lambda)
continue;
/* Compute the deltas for time-average smearing. */
oskar_evaluate_baseline_deltas_inline_d(station_x[SP],
station_x[SQ], station_y[SP], station_y[SQ],
inv_wavelength, time_int_sec, gha0_rad, dec0_rad,
&du, &dv, &dw);
/* Loop over sources. */
for (i = 0; i < num_sources; ++i)
{
double l, m, n, r1, r2;
/* Get source direction cosines. */
l = source_l[i];
m = source_m[i];
n = source_n[i];
/* Compute bandwidth- and time-smearing terms. */
r1 = oskar_sinc_d(uu * l + vv * m + ww * (n - 1.0));
r2 = oskar_evaluate_time_smearing_d(du, dv, dw, l, m, n);
r1 *= r2;
/* Accumulate baseline visibility response for source. */
oskar_accumulate_baseline_visibility_for_source_scalar_inline_d(
&sum, i, source_I, station_p, station_q, r1);
}
/* Add result to the baseline visibility. */
i = oskar_evaluate_baseline_index_inline(num_stations, SP, SQ);
vis[i].x += sum.x;
vis[i].y += sum.y;
}
}
}
#ifdef __cplusplus
}
#endif
|
second.c | #include <stdio.h>
#include <omp.h>
int main(){
int A[10] = {1,2,3,4,5,6,7,8,9,10}, i, m, k;
omp_set_dynamic(0);
m = omp_get_num_procs();
printf("Max No. of threads: %d\n", omp_get_max_threads());
omp_set_num_threads(6);
#pragma omp parallel
printf("Hello from thread no. %d of total %d threads\n", omp_get_thread_num(), omp_get_num_threads());
return 0;
} |
nodal_two_step_v_p_strategy.h | //
// Project Name: KratosPFEMFluidDynamicsApplication $
// Last modified by: $Author: AFranci $
// Date: $Date: June 2018 $
// Revision: $Revision: 0.0 $
//
//
#ifndef KRATOS_NODAL_TWO_STEP_V_P_STRATEGY_H
#define KRATOS_NODAL_TWO_STEP_V_P_STRATEGY_H
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/deprecated_variables.h"
#include "includes/cfd_variables.h"
#include "utilities/openmp_utils.h"
#include "processes/process.h"
#include "solving_strategies/schemes/scheme.h"
#include "solving_strategies/strategies/solving_strategy.h"
#include "custom_utilities/mesher_utilities.hpp"
#include "custom_utilities/boundary_normals_calculation_utilities.hpp"
#include "geometries/geometry.h"
#include "utilities/geometry_utilities.h"
#include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme.h"
#include "custom_strategies/builders_and_solvers/nodal_residualbased_elimination_builder_and_solver.h"
#include "custom_strategies/builders_and_solvers/nodal_residualbased_elimination_builder_and_solver_continuity.h"
#include "custom_strategies/builders_and_solvers/nodal_residualbased_block_builder_and_solver.h"
#include "custom_utilities/solver_settings.h"
#include "custom_strategies/strategies/gauss_seidel_linear_strategy.h"
#include "pfem_fluid_dynamics_application_variables.h"
#include <stdio.h>
#include <math.h>
#include <iostream>
#include <fstream>
namespace Kratos
{
///@addtogroup PFEMFluidDynamicsApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
template <class TSparseSpace,
class TDenseSpace,
class TLinearSolver>
class NodalTwoStepVPStrategy : public SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION(NodalTwoStepVPStrategy);
/// Counted pointer of NodalTwoStepVPStrategy
//typedef boost::shared_ptr< NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver> > Pointer;
typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
typedef typename BaseType::TDataType TDataType;
/// Node type (default is: Node<3>)
typedef Node<3> NodeType;
/// Geometry type (using with given NodeType)
typedef Geometry<NodeType> GeometryType;
typedef std::size_t SizeType;
//typedef typename BaseType::DofSetType DofSetType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::ElementsArrayType ElementsArrayType;
typedef typename SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer StrategyPointerType;
typedef TwoStepVPSolverSettings<TSparseSpace, TDenseSpace, TLinearSolver> SolverSettingsType;
typedef GeometryType::ShapeFunctionsGradientsType ShapeFunctionDerivativesArrayType;
typedef GlobalPointersVector<Node<3>> NodeWeakPtrVectorType;
///@}
///@name Life Cycle
///@{
NodalTwoStepVPStrategy(ModelPart &rModelPart,
SolverSettingsType &rSolverConfig) : BaseType(rModelPart)
{
InitializeStrategy(rSolverConfig);
}
NodalTwoStepVPStrategy(ModelPart &rModelPart,
/*SolverConfiguration<TSparseSpace, TDenseSpace, TLinearSolver>& rSolverConfig,*/
typename TLinearSolver::Pointer pVelocityLinearSolver,
typename TLinearSolver::Pointer pPressureLinearSolver,
bool ReformDofSet = true,
double VelTol = 0.0001,
double PresTol = 0.0001,
int MaxPressureIterations = 1, // Only for predictor-corrector
unsigned int TimeOrder = 2,
unsigned int DomainSize = 2) : BaseType(rModelPart), // Move Mesh flag, pass as input?
mVelocityTolerance(VelTol),
mPressureTolerance(PresTol),
mMaxPressureIter(MaxPressureIterations),
mDomainSize(DomainSize),
mTimeOrder(TimeOrder),
mReformDofSet(ReformDofSet)
{
KRATOS_TRY;
BaseType::SetEchoLevel(1);
// Check that input parameters are reasonable and sufficient.
this->Check();
bool CalculateNormDxFlag = true;
bool ReformDofAtEachIteration = false; // DofSet modifiaction is managed by the fractional step strategy, auxiliary strategies should not modify the DofSet directly.
// Additional Typedefs
//typedef typename Kratos::VariableComponent<Kratos::VectorComponentAdaptor<Kratos::array_1d<double, 3 > > > VarComponent;
typedef typename BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer BuilderSolverTypePointer;
typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
//initializing fractional velocity solution step
typedef Scheme<TSparseSpace, TDenseSpace> SchemeType;
typename SchemeType::Pointer pScheme;
typename SchemeType::Pointer Temp = typename SchemeType::Pointer(new ResidualBasedIncrementalUpdateStaticScheme<TSparseSpace, TDenseSpace>());
/* typename SchemeType::Pointer Temp = typename SchemeType::Pointer(new IncrementalUpdateStaticScheme< TSparseSpace, TDenseSpace > ()); */
pScheme.swap(Temp);
//CONSTRUCTION OF VELOCITY
BuilderSolverTypePointer vel_build = BuilderSolverTypePointer(new NodalResidualBasedEliminationBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pVelocityLinearSolver));
/* BuilderSolverTypePointer vel_build = BuilderSolverTypePointer(new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver > (pVelocityLinearSolver)); */
this->mpMomentumStrategy = typename BaseType::Pointer(new GaussSeidelLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pVelocityLinearSolver, vel_build, ReformDofAtEachIteration, CalculateNormDxFlag));
this->mpMomentumStrategy->SetEchoLevel(BaseType::GetEchoLevel());
vel_build->SetCalculateReactionsFlag(false);
/* BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new ResidualBasedEliminationBuilderAndSolverComponentwise<TSparseSpace, TDenseSpace, TLinearSolver, Variable<double> >(pPressureLinearSolver, PRESSURE)); */
/* BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver >(pPressureLinearSolver)); */
BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new NodalResidualBasedEliminationBuilderAndSolverContinuity<TSparseSpace, TDenseSpace, TLinearSolver>(pPressureLinearSolver));
/* BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new NodalResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver >(pPressureLinearSolver)); */
this->mpPressureStrategy = typename BaseType::Pointer(new GaussSeidelLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pPressureLinearSolver, pressure_build, ReformDofAtEachIteration, CalculateNormDxFlag));
this->mpPressureStrategy->SetEchoLevel(BaseType::GetEchoLevel());
pressure_build->SetCalculateReactionsFlag(false);
KRATOS_CATCH("");
}
/// Destructor.
virtual ~NodalTwoStepVPStrategy() {}
int Check() override
{
KRATOS_TRY;
// Check elements and conditions in the model part
int ierr = BaseType::Check();
if (ierr != 0)
return ierr;
if (DELTA_TIME.Key() == 0)
KRATOS_THROW_ERROR(std::runtime_error, "DELTA_TIME Key is 0. Check that the application was correctly registered.", "");
if (BDF_COEFFICIENTS.Key() == 0)
KRATOS_THROW_ERROR(std::runtime_error, "BDF_COEFFICIENTS Key is 0. Check that the application was correctly registered.", "");
ModelPart &rModelPart = BaseType::GetModelPart();
if (mTimeOrder == 2 && rModelPart.GetBufferSize() < 3)
KRATOS_THROW_ERROR(std::invalid_argument, "Buffer size too small for fractional step strategy (BDF2), needed 3, got ", rModelPart.GetBufferSize());
if (mTimeOrder == 1 && rModelPart.GetBufferSize() < 2)
KRATOS_THROW_ERROR(std::invalid_argument, "Buffer size too small for fractional step strategy (Backward Euler), needed 2, got ", rModelPart.GetBufferSize());
const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
for (ModelPart::ElementIterator itEl = rModelPart.ElementsBegin(); itEl != rModelPart.ElementsEnd(); ++itEl)
{
ierr = itEl->Check(rCurrentProcessInfo);
if (ierr != 0)
break;
}
for (ModelPart::ConditionIterator itCond = rModelPart.ConditionsBegin(); itCond != rModelPart.ConditionsEnd(); ++itCond)
{
ierr = itCond->Check(rCurrentProcessInfo);
if (ierr != 0)
break;
}
return ierr;
KRATOS_CATCH("");
}
double Solve() override
{
// Initialize BDF2 coefficients
ModelPart &rModelPart = BaseType::GetModelPart();
this->SetTimeCoefficients(rModelPart.GetProcessInfo());
double NormDp = 0.0;
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
double currentTime = rCurrentProcessInfo[TIME];
double timeInterval = rCurrentProcessInfo[DELTA_TIME];
bool timeIntervalChanged = rCurrentProcessInfo[TIME_INTERVAL_CHANGED];
// bool momentumAlreadyConverged=false;
// bool continuityAlreadyConverged=false;
unsigned int maxNonLinearIterations = mMaxPressureIter;
std::cout << "\n Solve with nodally_integrated_two_step_vp strategy at t=" << currentTime << "s" << std::endl;
if (timeIntervalChanged == true && currentTime > 10 * timeInterval)
{
maxNonLinearIterations *= 2;
}
if (currentTime < 10 * timeInterval)
{
if (BaseType::GetEchoLevel() > 1)
std::cout << "within the first 10 time steps, I consider the given iteration number x3" << std::endl;
maxNonLinearIterations *= 3;
}
if (currentTime < 20 * timeInterval && currentTime >= 10 * timeInterval)
{
if (BaseType::GetEchoLevel() > 1)
std::cout << "within the second 10 time steps, I consider the given iteration number x2" << std::endl;
maxNonLinearIterations *= 2;
}
bool momentumConverged = true;
bool continuityConverged = false;
bool fixedTimeStep = false;
/* boost::timer solve_step_time; */
this->InitializeSolutionStep();
for (unsigned int it = 0; it < maxNonLinearIterations; ++it)
{
if (BaseType::GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0)
std::cout << "----- > iteration: " << it << std::endl;
if (it == 0)
{
this->ComputeNodalVolume();
this->InitializeNonLinearIterations();
}
this->CalcNodalStrainsAndStresses();
momentumConverged = this->SolveMomentumIteration(it, maxNonLinearIterations, fixedTimeStep);
this->UpdateTopology(rModelPart, BaseType::GetEchoLevel());
this->ComputeNodalVolume();
this->InitializeNonLinearIterations();
this->CalcNodalStrains();
if (fixedTimeStep == false)
{
continuityConverged = this->SolveContinuityIteration(it, maxNonLinearIterations);
}
// if((momentumConverged==true || it==maxNonLinearIterations-1) && momentumAlreadyConverged==false){
// std::ofstream myfile;
// myfile.open ("momentumConvergedIteration.txt",std::ios::app);
// myfile << currentTime << "\t" << it << "\n";
// myfile.close();
// momentumAlreadyConverged=true;
// }
// if((continuityConverged==true || it==maxNonLinearIterations-1) && continuityAlreadyConverged==false){
// std::ofstream myfile;
// myfile.open ("continuityConvergedIteration.txt",std::ios::app);
// myfile << currentTime << "\t" << it << "\n";
// myfile.close();
// continuityAlreadyConverged=true;
// }
if (it == maxNonLinearIterations - 1 || ((continuityConverged && momentumConverged) && it > 1))
{
//this->ComputeErrorL2NormCaseImposedG();
//this->ComputeErrorL2NormCasePoiseuille();
this->CalculateAccelerations();
// std::ofstream myfile;
// myfile.open ("maxConvergedIteration.txt",std::ios::app);
// myfile << currentTime << "\t" << it << "\n";
// myfile.close();
}
if ((continuityConverged && momentumConverged) && it > 1)
{
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, false);
rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, false);
std::cout << "nodal V-P strategy converged in " << it + 1 << " iterations." << std::endl;
break;
}
}
if (!continuityConverged && !momentumConverged && BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0)
std::cout << "Convergence tolerance not reached." << std::endl;
if (mReformDofSet)
this->Clear();
/* std::cout << "solve_step_time : " << solve_step_time.elapsed() << std::endl; */
return NormDp;
}
void FinalizeSolutionStep() override
{
/* this->UpdateStressStrain(); */
}
void Initialize() override
{
std::cout << " Initialize in nodal_two_step_v_p_strategy" << std::endl;
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
unsigned int sizeStrains = 3 * (dimension - 1);
// #pragma omp parallel
// {
ModelPart::NodeIterator NodesBegin;
ModelPart::NodeIterator NodesEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd);
for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode)
{
NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
unsigned int neighbourNodes = neighb_nodes.size();
unsigned int sizeSDFNeigh = neighbourNodes * dimension;
if (itNode->SolutionStepsDataHas(NODAL_CAUCHY_STRESS))
{
Vector &rNodalStress = itNode->FastGetSolutionStepValue(NODAL_CAUCHY_STRESS);
if (rNodalStress.size() != sizeStrains)
{
rNodalStress.resize(sizeStrains, false);
}
noalias(rNodalStress) = ZeroVector(sizeStrains);
}
else
{
std::cout << "THIS node does not have NODAL_CAUCHY_STRESS... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(NODAL_DEVIATORIC_CAUCHY_STRESS))
{
Vector &rNodalStress = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS);
if (rNodalStress.size() != sizeStrains)
{
rNodalStress.resize(sizeStrains, false);
}
noalias(rNodalStress) = ZeroVector(sizeStrains);
}
else
{
std::cout << "THIS node does not have NODAL_DEVIATORIC_CAUCHY_STRESS... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(NODAL_VOLUME))
{
itNode->FastGetSolutionStepValue(NODAL_VOLUME) = 0;
}
else
{
std::cout << "THIS node does not have NODAL_VOLUME... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(NODAL_MEAN_MESH_SIZE))
{
itNode->FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE) = 0;
}
else
{
std::cout << "THIS node does not have NODAL_MEAN_MESH_SIZE... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(NODAL_FREESURFACE_AREA))
{
itNode->FastGetSolutionStepValue(NODAL_FREESURFACE_AREA) = 0;
}
else
{
std::cout << "THIS node does not have NODAL_FREESURFACE_AREA... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(NODAL_SFD_NEIGHBOURS))
{
Vector &rNodalSFDneighbours = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS);
if (rNodalSFDneighbours.size() != sizeSDFNeigh)
{
rNodalSFDneighbours.resize(sizeSDFNeigh, false);
}
noalias(rNodalSFDneighbours) = ZeroVector(sizeSDFNeigh);
}
else
{
std::cout << "THIS node does not have NODAL_SFD_NEIGHBOURS... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(NODAL_SPATIAL_DEF_RATE))
{
Vector &rSpatialDefRate = itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE);
if (rSpatialDefRate.size() != sizeStrains)
{
rSpatialDefRate.resize(sizeStrains, false);
}
noalias(rSpatialDefRate) = ZeroVector(sizeStrains);
}
else
{
std::cout << "THIS node does not have NODAL_SPATIAL_DEF_RATE... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(NODAL_DEFORMATION_GRAD))
{
Matrix &rFgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD);
if (rFgrad.size1() != dimension)
{
rFgrad.resize(dimension, dimension, false);
}
noalias(rFgrad) = ZeroMatrix(dimension, dimension);
}
else
{
std::cout << "THIS node does not have NODAL_DEFORMATION_GRAD... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(NODAL_DEFORMATION_GRAD_VEL))
{
Matrix &rFgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL);
if (rFgradVel.size1() != dimension)
{
rFgradVel.resize(dimension, dimension, false);
}
noalias(rFgradVel) = ZeroMatrix(dimension, dimension);
}
else
{
std::cout << "THIS node does not have NODAL_DEFORMATION_GRAD_VEL... " << itNode->X() << " " << itNode->Y() << std::endl;
}
this->AssignFluidMaterialToEachNode(itNode);
}
// }
}
void AssignFluidMaterialToEachNode(ModelPart::NodeIterator itNode)
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double timeInterval = rCurrentProcessInfo[DELTA_TIME];
double deviatoricCoeff = itNode->FastGetSolutionStepValue(DYNAMIC_VISCOSITY);
double volumetricCoeff = timeInterval * itNode->FastGetSolutionStepValue(BULK_MODULUS);
double currFirstLame = volumetricCoeff - 2.0 * deviatoricCoeff / 3.0;
itNode->FastGetSolutionStepValue(VOLUMETRIC_COEFFICIENT) = currFirstLame;
itNode->FastGetSolutionStepValue(DEVIATORIC_COEFFICIENT) = deviatoricCoeff;
}
void ComputeNodalVolume()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ElementsArrayType &pElements = rModelPart.Elements();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
vector<unsigned int> element_partition;
OpenMPUtils::CreatePartition(number_of_threads, pElements.size(), element_partition);
// #pragma omp parallel
// {
int k = OpenMPUtils::ThisThread();
typename ElementsArrayType::iterator ElemBegin = pElements.begin() + element_partition[k];
typename ElementsArrayType::iterator ElemEnd = pElements.begin() + element_partition[k + 1];
for (typename ElementsArrayType::iterator itElem = ElemBegin; itElem != ElemEnd; itElem++) //MSI: To be parallelized
{
Element::GeometryType &geometry = itElem->GetGeometry();
double elementalVolume = 0;
if (dimension == 2)
{
elementalVolume = geometry.Area() / 3.0;
}
else if (dimension == 3)
{
elementalVolume = geometry.Volume() * 0.25;
}
// index = 0;
unsigned int numNodes = geometry.size();
for (unsigned int i = 0; i < numNodes; i++)
{
double &nodalVolume = geometry(i)->FastGetSolutionStepValue(NODAL_VOLUME);
nodalVolume += elementalVolume;
}
}
// }
}
void InitializeSolutionStep() override
{
this->FillNodalSFDVector();
}
void FillNodalSFDVector()
{
ModelPart &rModelPart = BaseType::GetModelPart();
// #pragma omp parallel
// {
// ModelPart::NodeIterator NodesBegin;
// ModelPart::NodeIterator NodesEnd;
// OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd);
// for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode)
// {
for (ModelPart::NodeIterator itNode = rModelPart.NodesBegin(); itNode != rModelPart.NodesEnd(); itNode++)
{
InitializeNodalVariablesForRemeshedDomain(itNode);
SetNeighboursOrderToNode(itNode); // it assigns neighbours to inner nodes, filling NODAL_SFD_NEIGHBOURS_ORDER
}
}
void SetNeighboursOrderToNode(ModelPart::NodeIterator itNode)
{
NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
unsigned int neighbourNodes = neighb_nodes.size() + 1; // +1 becausealso the node itself must be considered as nieghbor node
Vector &rNodeOrderedNeighbours = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER);
if (rNodeOrderedNeighbours.size() != neighbourNodes)
rNodeOrderedNeighbours.resize(neighbourNodes, false);
noalias(rNodeOrderedNeighbours) = ZeroVector(neighbourNodes);
rNodeOrderedNeighbours[0] = itNode->Id();
if (neighbourNodes > 1)
{
for (unsigned int k = 0; k < neighbourNodes - 1; k++)
{
rNodeOrderedNeighbours[k + 1] = neighb_nodes[k].Id();
}
}
}
void InitializeNodalVariablesForRemeshedDomain(ModelPart::NodeIterator itNode)
{
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
unsigned int sizeStrains = 3 * (dimension - 1);
NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
unsigned int neighbourNodes = neighb_nodes.size() + 1;
unsigned int sizeSDFNeigh = neighbourNodes * dimension;
if (itNode->SolutionStepsDataHas(NODAL_CAUCHY_STRESS))
{
Vector &rNodalStress = itNode->FastGetSolutionStepValue(NODAL_CAUCHY_STRESS);
if (rNodalStress.size() != sizeStrains)
rNodalStress.resize(sizeStrains, false);
noalias(rNodalStress) = ZeroVector(sizeStrains);
}
if (itNode->SolutionStepsDataHas(NODAL_DEVIATORIC_CAUCHY_STRESS))
{
Vector &rNodalDevStress = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS);
if (rNodalDevStress.size() != sizeStrains)
rNodalDevStress.resize(sizeStrains, false);
noalias(rNodalDevStress) = ZeroVector(sizeStrains);
}
if (itNode->SolutionStepsDataHas(NODAL_SFD_NEIGHBOURS_ORDER))
{
Vector &rNodalSFDneighboursOrder = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER);
if (rNodalSFDneighboursOrder.size() != neighbourNodes)
rNodalSFDneighboursOrder.resize(neighbourNodes, false);
noalias(rNodalSFDneighboursOrder) = ZeroVector(neighbourNodes);
}
if (itNode->SolutionStepsDataHas(NODAL_SFD_NEIGHBOURS))
{
Vector &rNodalSFDneighbours = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS);
if (rNodalSFDneighbours.size() != sizeSDFNeigh)
rNodalSFDneighbours.resize(sizeSDFNeigh, false);
noalias(rNodalSFDneighbours) = ZeroVector(sizeSDFNeigh);
}
if (itNode->SolutionStepsDataHas(NODAL_SPATIAL_DEF_RATE))
{
Vector &rSpatialDefRate = itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE);
if (rSpatialDefRate.size() != sizeStrains)
rSpatialDefRate.resize(sizeStrains, false);
noalias(rSpatialDefRate) = ZeroVector(sizeStrains);
}
if (itNode->SolutionStepsDataHas(NODAL_DEFORMATION_GRAD))
{
Matrix &rFgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD);
if (rFgrad.size1() != dimension)
rFgrad.resize(dimension, dimension, false);
noalias(rFgrad) = ZeroMatrix(dimension, dimension);
}
if (itNode->SolutionStepsDataHas(NODAL_DEFORMATION_GRAD_VEL))
{
Matrix &rFgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL);
if (rFgradVel.size1() != dimension)
rFgradVel.resize(dimension, dimension, false);
noalias(rFgradVel) = ZeroMatrix(dimension, dimension);
}
if (itNode->SolutionStepsDataHas(NODAL_VOLUME))
{
itNode->FastGetSolutionStepValue(NODAL_VOLUME) = 0;
}
if (itNode->SolutionStepsDataHas(NODAL_MEAN_MESH_SIZE))
{
itNode->FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE) = 0;
}
if (itNode->SolutionStepsDataHas(NODAL_FREESURFACE_AREA))
{
itNode->FastGetSolutionStepValue(NODAL_FREESURFACE_AREA) = 0;
}
if (itNode->SolutionStepsDataHas(NODAL_VOLUMETRIC_DEF_RATE))
{
itNode->FastGetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = 0;
}
if (itNode->SolutionStepsDataHas(NODAL_EQUIVALENT_STRAIN_RATE))
{
itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = 0;
}
}
void InitializeNonLinearIterations()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ElementsArrayType &pElements = rModelPart.Elements();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
vector<unsigned int> element_partition;
OpenMPUtils::CreatePartition(number_of_threads, pElements.size(), element_partition);
// #pragma omp parallel
// {
int k = OpenMPUtils::ThisThread();
typename ElementsArrayType::iterator ElemBegin = pElements.begin() + element_partition[k];
typename ElementsArrayType::iterator ElemEnd = pElements.begin() + element_partition[k + 1];
for (typename ElementsArrayType::iterator itElem = ElemBegin; itElem != ElemEnd; itElem++) //MSI: To be parallelized
{
itElem->InitializeNonLinearIteration(rCurrentProcessInfo);
}
// }
}
void CalcNodalStrainsAndStresses()
{
ModelPart &rModelPart = BaseType::GetModelPart();
// #pragma omp parallel
// {
ModelPart::NodeIterator NodesBegin;
ModelPart::NodeIterator NodesEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd);
for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode)
{
double nodalVolume = itNode->FastGetSolutionStepValue(NODAL_VOLUME);
double theta = 0.5;
if (nodalVolume > 0)
{
this->ComputeAndStoreNodalDeformationGradient(itNode, theta);
this->CalcNodalStrainsAndStressesForNode(itNode);
}
else
{ // if nodalVolume==0
InitializeNodalVariablesForRemeshedDomain(itNode);
}
}
// }
}
void CalcNodalStrainsAndStressesForNode(ModelPart::NodeIterator itNode)
{
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
double currFirstLame = itNode->FastGetSolutionStepValue(VOLUMETRIC_COEFFICIENT);
double deviatoricCoeff = itNode->FastGetSolutionStepValue(DEVIATORIC_COEFFICIENT);
Matrix Fgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD);
Matrix FgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL);
double detFgrad = 1.0;
Matrix InvFgrad = ZeroMatrix(dimension, dimension);
Matrix SpatialVelocityGrad = ZeroMatrix(dimension, dimension);
if (dimension == 2)
{
MathUtils<double>::InvertMatrix2(Fgrad, InvFgrad, detFgrad);
}
else if (dimension == 3)
{
MathUtils<double>::InvertMatrix3(Fgrad, InvFgrad, detFgrad);
}
//it computes the spatial velocity gradient tensor --> [L_ij]=dF_ik*invF_kj
SpatialVelocityGrad = prod(FgradVel, InvFgrad);
if (dimension == 2)
{
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0);
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1);
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1));
double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR);
if (yieldShear > 0)
{
itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = sqrt((2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] +
2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] +
4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2]));
double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT);
double equivalentStrainRate = itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE);
double exponent = -adaptiveExponent * equivalentStrainRate;
if (equivalentStrainRate != 0)
{
deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent));
}
if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0)
{
// for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield
deviatoricCoeff = adaptiveExponent * yieldShear;
}
}
double DefVol = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] + itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1];
itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = DefVol;
double nodalSigmaTot_xx = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0];
double nodalSigmaTot_yy = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1];
double nodalSigmaTot_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2];
double nodalSigmaDev_xx = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] - DefVol / 3.0);
double nodalSigmaDev_yy = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] - DefVol / 3.0);
double nodalSigmaDev_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2];
itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[0] = nodalSigmaTot_xx;
itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[1] = nodalSigmaTot_yy;
itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[2] = nodalSigmaTot_xy;
itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[0] = nodalSigmaDev_xx;
itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[1] = nodalSigmaDev_yy;
itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[2] = nodalSigmaDev_xy;
}
else if (dimension == 3)
{
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0);
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1);
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] = SpatialVelocityGrad(2, 2);
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1));
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4] = 0.5 * (SpatialVelocityGrad(2, 0) + SpatialVelocityGrad(0, 2));
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5] = 0.5 * (SpatialVelocityGrad(2, 1) + SpatialVelocityGrad(1, 2));
double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR);
if (yieldShear > 0)
{
itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = sqrt(2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] +
2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] +
2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] +
4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3] +
4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4] +
4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5]);
double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT);
double equivalentStrainRate = itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE);
double exponent = -adaptiveExponent * equivalentStrainRate;
if (equivalentStrainRate != 0)
{
deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent));
}
if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0)
{
// for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield
deviatoricCoeff = adaptiveExponent * yieldShear;
}
}
double DefVol = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] + itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] + itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2];
itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = DefVol;
double nodalSigmaTot_xx = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0];
double nodalSigmaTot_yy = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1];
double nodalSigmaTot_zz = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2];
double nodalSigmaTot_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3];
double nodalSigmaTot_xz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4];
double nodalSigmaTot_yz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5];
double nodalSigmaDev_xx = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] - DefVol / 3.0);
double nodalSigmaDev_yy = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] - DefVol / 3.0);
double nodalSigmaDev_zz = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] - DefVol / 3.0);
double nodalSigmaDev_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3];
double nodalSigmaDev_xz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4];
double nodalSigmaDev_yz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5];
itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[0] = nodalSigmaTot_xx;
itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[1] = nodalSigmaTot_yy;
itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[2] = nodalSigmaTot_zz;
itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[3] = nodalSigmaTot_xy;
itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[4] = nodalSigmaTot_xz;
itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[5] = nodalSigmaTot_yz;
itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[0] = nodalSigmaDev_xx;
itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[1] = nodalSigmaDev_yy;
itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[2] = nodalSigmaDev_zz;
itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[3] = nodalSigmaDev_xy;
itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[4] = nodalSigmaDev_xz;
itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[5] = nodalSigmaDev_yz;
}
}
void CalcNodalStrainsForNode(ModelPart::NodeIterator itNode)
{
/* std::cout << "Calc Nodal Strains " << std::endl; */
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
// Matrix Fgrad=itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD);
// Matrix FgradVel=itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL);
// double detFgrad=1.0;
// Matrix InvFgrad=ZeroMatrix(dimension,dimension);
// Matrix SpatialVelocityGrad=ZeroMatrix(dimension,dimension);
double detFgrad = 1.0;
Matrix nodalFgrad = ZeroMatrix(dimension, dimension);
Matrix FgradVel = ZeroMatrix(dimension, dimension);
Matrix InvFgrad = ZeroMatrix(dimension, dimension);
Matrix SpatialVelocityGrad = ZeroMatrix(dimension, dimension);
nodalFgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD);
FgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL);
//Inverse
if (dimension == 2)
{
MathUtils<double>::InvertMatrix2(nodalFgrad, InvFgrad, detFgrad);
}
else if (dimension == 3)
{
MathUtils<double>::InvertMatrix3(nodalFgrad, InvFgrad, detFgrad);
}
//it computes the spatial velocity gradient tensor --> [L_ij]=dF_ik*invF_kj
SpatialVelocityGrad = prod(FgradVel, InvFgrad);
if (dimension == 2)
{
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0);
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1);
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1));
itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = sqrt((2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] +
2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] +
4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2]));
double DefX = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0];
double DefY = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1];
double DefVol = DefX + DefY;
itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = DefVol;
}
else if (dimension == 3)
{
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0);
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1);
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] = SpatialVelocityGrad(2, 2);
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1));
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4] = 0.5 * (SpatialVelocityGrad(2, 0) + SpatialVelocityGrad(0, 2));
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5] = 0.5 * (SpatialVelocityGrad(2, 1) + SpatialVelocityGrad(1, 2));
itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = sqrt(2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] +
2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] +
2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] +
4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3] +
4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4] +
4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5]);
double DefX = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0];
double DefY = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1];
double DefZ = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2];
double DefVol = DefX + DefY + DefZ;
itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = DefVol;
}
}
void CalcNodalStrains()
{
/* std::cout << "Calc Nodal Strains " << std::endl; */
ModelPart &rModelPart = BaseType::GetModelPart();
// #pragma omp parallel
// {
ModelPart::NodeIterator NodesBegin;
ModelPart::NodeIterator NodesEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd);
for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode)
{
double nodalVolume = itNode->FastGetSolutionStepValue(NODAL_VOLUME);
double theta = 1.0;
if (nodalVolume > 0)
{
this->ComputeAndStoreNodalDeformationGradient(itNode, theta);
this->CalcNodalStrainsForNode(itNode);
}
else
{ // if nodalVolume==0
InitializeNodalVariablesForRemeshedDomain(itNode);
}
}
// }
/* std::cout << "Calc Nodal Strains And Stresses DONE " << std::endl; */
}
void ComputeAndStoreNodalDeformationGradient(ModelPart::NodeIterator itNode, double theta)
{
KRATOS_TRY;
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER);
Vector rNodalSFDneigh = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS);
/* unsigned int idThisNode=nodalSFDneighboursId[0]; */
const unsigned int neighSize = nodalSFDneighboursId.size();
Matrix Fgrad = ZeroMatrix(dimension, dimension);
Matrix FgradVel = ZeroMatrix(dimension, dimension);
NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
if (dimension == 2)
{
double dNdXi = rNodalSFDneigh[0];
double dNdYi = rNodalSFDneigh[1];
Fgrad(0, 0) += dNdXi * itNode->X();
Fgrad(0, 1) += dNdYi * itNode->X();
Fgrad(1, 0) += dNdXi * itNode->Y();
Fgrad(1, 1) += dNdYi * itNode->Y();
double VelocityX = itNode->FastGetSolutionStepValue(VELOCITY_X, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta);
double VelocityY = itNode->FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta);
FgradVel(0, 0) += dNdXi * VelocityX;
FgradVel(0, 1) += dNdYi * VelocityX;
FgradVel(1, 0) += dNdXi * VelocityY;
FgradVel(1, 1) += dNdYi * VelocityY;
unsigned int firstRow = 2;
if (neighSize > 0)
{
for (unsigned int i = 0; i < neighSize - 1; i++) //neigh_nodes has one cell less than nodalSFDneighboursId becuase this has also the considered node ID at the beginning
{
dNdXi = rNodalSFDneigh[firstRow];
dNdYi = rNodalSFDneigh[firstRow + 1];
unsigned int neigh_nodes_id = neighb_nodes[i].Id();
unsigned int other_neigh_nodes_id = nodalSFDneighboursId[i + 1];
if (neigh_nodes_id != other_neigh_nodes_id)
{
std::cout << "node (x,y)=(" << itNode->X() << "," << itNode->Y() << ") with neigh_nodes_id " << neigh_nodes_id << " different than other_neigh_nodes_id " << other_neigh_nodes_id << std::endl;
}
Fgrad(0, 0) += dNdXi * neighb_nodes[i].X();
Fgrad(0, 1) += dNdYi * neighb_nodes[i].X();
Fgrad(1, 0) += dNdXi * neighb_nodes[i].Y();
Fgrad(1, 1) += dNdYi * neighb_nodes[i].Y();
VelocityX = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta);
VelocityY = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta);
FgradVel(0, 0) += dNdXi * VelocityX;
FgradVel(0, 1) += dNdYi * VelocityX;
FgradVel(1, 0) += dNdXi * VelocityY;
FgradVel(1, 1) += dNdYi * VelocityY;
firstRow += 2;
}
}
}
else
{
double dNdXi = rNodalSFDneigh[0];
double dNdYi = rNodalSFDneigh[1];
double dNdZi = rNodalSFDneigh[2];
double VelocityX = itNode->FastGetSolutionStepValue(VELOCITY_X, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta);
double VelocityY = itNode->FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta);
double VelocityZ = itNode->FastGetSolutionStepValue(VELOCITY_Z, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Z, 1) * (1 - theta);
Fgrad(0, 0) += dNdXi * itNode->X();
Fgrad(0, 1) += dNdYi * itNode->X();
Fgrad(0, 2) += dNdZi * itNode->X();
Fgrad(1, 0) += dNdXi * itNode->Y();
Fgrad(1, 1) += dNdYi * itNode->Y();
Fgrad(1, 2) += dNdZi * itNode->Y();
Fgrad(2, 0) += dNdXi * itNode->Z();
Fgrad(2, 1) += dNdYi * itNode->Z();
Fgrad(2, 2) += dNdZi * itNode->Z();
FgradVel(0, 0) += dNdXi * VelocityX;
FgradVel(0, 1) += dNdYi * VelocityX;
FgradVel(0, 2) += dNdZi * VelocityX;
FgradVel(1, 0) += dNdXi * VelocityY;
FgradVel(1, 1) += dNdYi * VelocityY;
FgradVel(1, 2) += dNdZi * VelocityY;
FgradVel(2, 0) += dNdXi * VelocityZ;
FgradVel(2, 1) += dNdYi * VelocityZ;
FgradVel(2, 2) += dNdZi * VelocityZ;
unsigned int firstRow = 3;
if (neighSize > 0)
{
for (unsigned int i = 0; i < neighSize - 1; i++)
{
dNdXi = rNodalSFDneigh[firstRow];
dNdYi = rNodalSFDneigh[firstRow + 1];
dNdZi = rNodalSFDneigh[firstRow + 2];
VelocityX = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta);
VelocityY = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta);
VelocityZ = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Z, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Z, 1) * (1 - theta);
Fgrad(0, 0) += dNdXi * neighb_nodes[i].X();
Fgrad(0, 1) += dNdYi * neighb_nodes[i].X();
Fgrad(0, 2) += dNdZi * neighb_nodes[i].X();
Fgrad(1, 0) += dNdXi * neighb_nodes[i].Y();
Fgrad(1, 1) += dNdYi * neighb_nodes[i].Y();
Fgrad(1, 2) += dNdZi * neighb_nodes[i].Y();
Fgrad(2, 0) += dNdXi * neighb_nodes[i].Z();
Fgrad(2, 1) += dNdYi * neighb_nodes[i].Z();
Fgrad(2, 2) += dNdZi * neighb_nodes[i].Z();
FgradVel(0, 0) += dNdXi * VelocityX;
FgradVel(0, 1) += dNdYi * VelocityX;
FgradVel(0, 2) += dNdZi * VelocityX;
FgradVel(1, 0) += dNdXi * VelocityY;
FgradVel(1, 1) += dNdYi * VelocityY;
FgradVel(1, 2) += dNdZi * VelocityY;
FgradVel(2, 0) += dNdXi * VelocityZ;
FgradVel(2, 1) += dNdYi * VelocityZ;
FgradVel(2, 2) += dNdZi * VelocityZ;
firstRow += 3;
}
}
}
itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD) = Fgrad;
itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL) = FgradVel;
KRATOS_CATCH("");
}
void UpdateTopology(ModelPart &rModelPart, unsigned int echoLevel)
{
KRATOS_TRY;
/* this->CalculateDisplacements(); */
this->CalculateDisplacementsAndResetNodalVariables();
BaseType::MoveMesh();
BoundaryNormalsCalculationUtilities BoundaryComputation;
BoundaryComputation.CalculateWeightedBoundaryNormals(rModelPart, echoLevel);
KRATOS_CATCH("");
}
void CalculatePressureVelocity()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double timeInterval = rCurrentProcessInfo[DELTA_TIME];
unsigned int timeStep = rCurrentProcessInfo[STEP];
for (ModelPart::NodeIterator i = rModelPart.NodesBegin();
i != rModelPart.NodesEnd(); ++i)
{
if (timeStep == 1)
{
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0;
}
else
{
double &CurrentPressure = (i)->FastGetSolutionStepValue(PRESSURE, 0);
double &PreviousPressure = (i)->FastGetSolutionStepValue(PRESSURE, 1);
double &CurrentPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0);
CurrentPressureVelocity = (CurrentPressure - PreviousPressure) / timeInterval;
}
}
}
void CalculatePressureAcceleration()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double timeInterval = rCurrentProcessInfo[DELTA_TIME];
unsigned int timeStep = rCurrentProcessInfo[STEP];
for (ModelPart::NodeIterator i = rModelPart.NodesBegin(); i != rModelPart.NodesEnd(); ++i)
{
if (timeStep == 1)
{
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0;
}
else
{
double &CurrentPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0);
double &PreviousPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1);
double &CurrentPressureAcceleration = (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0);
CurrentPressureAcceleration = (CurrentPressureVelocity - PreviousPressureVelocity) / timeInterval;
}
}
}
void CalculateAccelerations()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
Vector &BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS];
for (ModelPart::NodeIterator i = rModelPart.NodesBegin(); i != rModelPart.NodesEnd(); ++i)
{
array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0);
array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1);
array_1d<double, 3> &CurrentAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 0);
array_1d<double, 3> &PreviousAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 1);
if ((i)->IsNot(ISOLATED) && (i)->IsNot(RIGID))
{
UpdateAccelerations(CurrentAcceleration, CurrentVelocity, PreviousAcceleration, PreviousVelocity, BDFcoeffs);
}
else if ((i)->Is(RIGID))
{
array_1d<double, 3> Zeros(3, 0.0);
(i)->FastGetSolutionStepValue(ACCELERATION, 0) = Zeros;
(i)->FastGetSolutionStepValue(ACCELERATION, 1) = Zeros;
}
else
{
(i)->FastGetSolutionStepValue(NODAL_VOLUME) = 0.0;
(i)->FastGetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = 0.0;
(i)->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = 0;
(i)->FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE) = 0.0;
(i)->FastGetSolutionStepValue(NODAL_FREESURFACE_AREA) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE, 0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE, 1) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0.0;
if ((i)->SolutionStepsDataHas(VOLUME_ACCELERATION))
{
array_1d<double, 3> &VolumeAcceleration = (i)->FastGetSolutionStepValue(VOLUME_ACCELERATION);
(i)->FastGetSolutionStepValue(ACCELERATION, 0) = VolumeAcceleration;
(i)->FastGetSolutionStepValue(VELOCITY, 0) += VolumeAcceleration * rCurrentProcessInfo[DELTA_TIME];
}
}
}
}
inline void UpdateAccelerations(array_1d<double, 3> &CurrentAcceleration,
const array_1d<double, 3> &CurrentVelocity,
array_1d<double, 3> &PreviousAcceleration,
const array_1d<double, 3> &PreviousVelocity,
Vector &BDFcoeffs)
{
/* noalias(PreviousAcceleration)=CurrentAcceleration; */
noalias(CurrentAcceleration) = -BDFcoeffs[1] * (CurrentVelocity - PreviousVelocity) - PreviousAcceleration;
// std::cout<<"rBDFCoeffs[0] is "<<rBDFCoeffs[0]<<std::endl;//3/(2*delta_t)
// std::cout<<"rBDFCoeffs[1] is "<<rBDFCoeffs[1]<<std::endl;//-2/(delta_t)
// std::cout<<"rBDFCoeffs[2] is "<<rBDFCoeffs[2]<<std::endl;//1/(2*delta_t)
}
void CalculateDisplacements()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double TimeStep = rCurrentProcessInfo[DELTA_TIME];
for (ModelPart::NodeIterator i = rModelPart.NodesBegin(); i != rModelPart.NodesEnd(); ++i)
{
array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0);
array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1);
array_1d<double, 3> &CurrentDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 0);
array_1d<double, 3> &PreviousDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 1);
/* if( i->IsFixed(DISPLACEMENT_X) == false ) */
CurrentDisplacement[0] = 0.5 * TimeStep * (CurrentVelocity[0] + PreviousVelocity[0]) + PreviousDisplacement[0];
/* if( i->IsFixed(DISPLACEMENT_Y) == false ) */
CurrentDisplacement[1] = 0.5 * TimeStep * (CurrentVelocity[1] + PreviousVelocity[1]) + PreviousDisplacement[1];
/* if( i->IsFixed(DISPLACEMENT_Z) == false ) */
CurrentDisplacement[2] = 0.5 * TimeStep * (CurrentVelocity[2] + PreviousVelocity[2]) + PreviousDisplacement[2];
}
}
void CalculateDisplacementsAndResetNodalVariables()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double TimeStep = rCurrentProcessInfo[DELTA_TIME];
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
unsigned int sizeStrains = 3 * (dimension - 1);
// #pragma omp parallel
// {
ModelPart::NodeIterator NodesBegin;
ModelPart::NodeIterator NodesEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd);
for (ModelPart::NodeIterator i = NodesBegin; i != NodesEnd; ++i)
{
array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0);
array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1);
array_1d<double, 3> &CurrentDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 0);
array_1d<double, 3> &PreviousDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 1);
CurrentDisplacement[0] = 0.5 * TimeStep * (CurrentVelocity[0] + PreviousVelocity[0]) + PreviousDisplacement[0];
CurrentDisplacement[1] = 0.5 * TimeStep * (CurrentVelocity[1] + PreviousVelocity[1]) + PreviousDisplacement[1];
if (dimension == 3)
{
CurrentDisplacement[2] = 0.5 * TimeStep * (CurrentVelocity[2] + PreviousVelocity[2]) + PreviousDisplacement[2];
}
///// reset Nodal variables //////
Vector &rNodalSFDneighbours = i->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS);
unsigned int sizeSDFNeigh = rNodalSFDneighbours.size();
// unsigned int neighbourNodes=i->GetValue(NEIGHBOUR_NODES).size()+1;
// unsigned int sizeSDFNeigh=neighbourNodes*dimension;
i->FastGetSolutionStepValue(NODAL_VOLUME) = 0;
i->FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE) = 0;
i->FastGetSolutionStepValue(NODAL_FREESURFACE_AREA) = 0;
i->FastGetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = 0;
i->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = 0;
noalias(rNodalSFDneighbours) = ZeroVector(sizeSDFNeigh);
Vector &rSpatialDefRate = i->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE);
noalias(rSpatialDefRate) = ZeroVector(sizeStrains);
Matrix &rFgrad = i->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD);
noalias(rFgrad) = ZeroMatrix(dimension, dimension);
Matrix &rFgradVel = i->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL);
noalias(rFgradVel) = ZeroMatrix(dimension, dimension);
}
// }
}
void UpdatePressureAccelerations()
{
this->CalculateAccelerations();
this->CalculatePressureVelocity();
this->CalculatePressureAcceleration();
}
void Clear() override
{
mpMomentumStrategy->Clear();
mpPressureStrategy->Clear();
}
///@}
///@name Access
///@{
void SetEchoLevel(int Level) override
{
BaseType::SetEchoLevel(Level);
int StrategyLevel = Level > 0 ? Level - 1 : 0;
mpMomentumStrategy->SetEchoLevel(StrategyLevel);
mpPressureStrategy->SetEchoLevel(StrategyLevel);
}
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
std::stringstream buffer;
buffer << "NodalTwoStepVPStrategy";
return buffer.str();
}
/// Print information about this object.
void PrintInfo(std::ostream &rOStream) const override
{
rOStream << "NodalTwoStepVPStrategy";
}
/// Print object's data.
void PrintData(std::ostream &rOStream) const override
{
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected Life Cycle
///@{
///@}
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/// Calculate the coefficients for time iteration.
/**
* @param rCurrentProcessInfo ProcessInfo instance from the fluid ModelPart. Must contain DELTA_TIME and BDF_COEFFICIENTS variables.
*/
void SetTimeCoefficients(ProcessInfo &rCurrentProcessInfo)
{
KRATOS_TRY;
if (mTimeOrder == 2)
{
//calculate the BDF coefficients
double Dt = rCurrentProcessInfo[DELTA_TIME];
double OldDt = rCurrentProcessInfo.GetPreviousTimeStepInfo(1)[DELTA_TIME];
double Rho = OldDt / Dt;
double TimeCoeff = 1.0 / (Dt * Rho * Rho + Dt * Rho);
Vector &BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS];
BDFcoeffs.resize(3, false);
BDFcoeffs[0] = TimeCoeff * (Rho * Rho + 2.0 * Rho); //coefficient for step n+1 (3/2Dt if Dt is constant)
BDFcoeffs[1] = -TimeCoeff * (Rho * Rho + 2.0 * Rho + 1.0); //coefficient for step n (-4/2Dt if Dt is constant)
BDFcoeffs[2] = TimeCoeff; //coefficient for step n-1 (1/2Dt if Dt is constant)
}
else if (mTimeOrder == 1)
{
double Dt = rCurrentProcessInfo[DELTA_TIME];
double TimeCoeff = 1.0 / Dt;
Vector &BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS];
BDFcoeffs.resize(2, false);
BDFcoeffs[0] = TimeCoeff; //coefficient for step n+1 (1/Dt)
BDFcoeffs[1] = -TimeCoeff; //coefficient for step n (-1/Dt)
}
KRATOS_CATCH("");
}
bool SolveMomentumIteration(unsigned int it, unsigned int maxIt, bool &fixedTimeStep)
{
ModelPart &rModelPart = BaseType::GetModelPart();
int Rank = rModelPart.GetCommunicator().MyPID();
bool ConvergedMomentum = false;
double NormDv = 0;
fixedTimeStep = false;
// build momentum system and solve for fractional step velocity increment
rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP, 1);
if (it == 0)
{
mpMomentumStrategy->InitializeSolutionStep();
/* this->SetNeighboursVelocityId(); */
}
NormDv = mpMomentumStrategy->Solve();
if (BaseType::GetEchoLevel() > 1 && Rank == 0)
std::cout << "-------------- s o l v e d ! ------------------" << std::endl;
double DvErrorNorm = 0;
ConvergedMomentum = this->CheckVelocityConvergence(NormDv, DvErrorNorm);
unsigned int iterationForCheck = 3;
KRATOS_INFO("TwoStepVPStrategy") << "iteration(" << it << ") Velocity error: " << DvErrorNorm << " velTol: " << mVelocityTolerance << std::endl;
// Check convergence
if (it == maxIt - 1)
{
std::cout << " iteration(" << it << ") Final Velocity error: " << DvErrorNorm << " velTol: " << mVelocityTolerance << std::endl;
fixedTimeStep = this->FixTimeStepMomentum(DvErrorNorm);
}
else if (it > iterationForCheck)
{
fixedTimeStep = this->CheckMomentumConvergence(DvErrorNorm);
}
// ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo();
// double currentTime = rCurrentProcessInfo[TIME];
// double tolerance=0.0000000001;
// if(currentTime>(0.25-tolerance) && currentTime<(0.25+tolerance)){
// std::ofstream myfile;
// myfile.open ("velocityConvergenceAt025s.txt",std::ios::app);
// myfile << it << "\t" << DvErrorNorm << "\n";
// myfile.close();
// }
// else if(currentTime>(0.5-tolerance) && currentTime<(0.5+tolerance)){
// std::ofstream myfile;
// myfile.open ("velocityConvergenceAt05s.txt",std::ios::app);
// myfile << it << "\t" << DvErrorNorm << "\n";
// myfile.close();
// }
// else if(currentTime>(0.75-tolerance) && currentTime<(0.75+tolerance)){
// std::ofstream myfile;
// myfile.open ("velocityConvergenceAt075s.txt",std::ios::app);
// myfile << it << "\t" << DvErrorNorm << "\n";
// myfile.close();
// }
// else if(currentTime>(1.0-tolerance) && currentTime<(1.0+tolerance)){
// std::ofstream myfile;
// myfile.open ("velocityConvergenceAt100s.txt",std::ios::app);
// myfile << it << "\t" << DvErrorNorm << "\n";
// myfile.close();
// }
if (!ConvergedMomentum && BaseType::GetEchoLevel() > 0 && Rank == 0)
std::cout << "Momentum equations did not reach the convergence tolerance." << std::endl;
return ConvergedMomentum;
}
bool SolveContinuityIteration(unsigned int it, unsigned int maxIt)
{
ModelPart &rModelPart = BaseType::GetModelPart();
int Rank = rModelPart.GetCommunicator().MyPID();
bool ConvergedContinuity = false;
double NormDp = 0;
// 2. Pressure solution
rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP, 5);
if (it == 0)
{
mpPressureStrategy->InitializeSolutionStep();
}
NormDp = mpPressureStrategy->Solve();
if (BaseType::GetEchoLevel() > 0 && Rank == 0)
std::cout << "The norm of pressure is: " << NormDp << std::endl;
double DpErrorNorm = 0;
ConvergedContinuity = this->CheckPressureConvergence(NormDp, DpErrorNorm);
// Check convergence
if (it == maxIt - 1)
{
std::cout << " iteration(" << it << ") Final Pressure error: " << DpErrorNorm << " presTol: " << mPressureTolerance << std::endl;
ConvergedContinuity = this->FixTimeStepContinuity(DpErrorNorm);
}
else
{
std::cout << " iteration(" << it << ") Pressure error: " << DpErrorNorm << " presTol: " << mPressureTolerance << std::endl;
}
// ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo();
// double currentTime = rCurrentProcessInfo[TIME];
// double tolerance=0.0000000001;
// if(currentTime>(0.25-tolerance) && currentTime<(0.25+tolerance)){
// std::ofstream myfile;
// myfile.open ("pressureConvergenceAt025s.txt",std::ios::app);
// myfile << it << "\t" << DpErrorNorm << "\n";
// myfile.close();
// }
// else if(currentTime>(0.5-tolerance) && currentTime<(0.5+tolerance)){
// std::ofstream myfile;
// myfile.open ("pressureConvergenceAt05s.txt",std::ios::app);
// myfile << it << "\t" << DpErrorNorm << "\n";
// myfile.close();
// }
// else if(currentTime>(0.75-tolerance) && currentTime<(0.75+tolerance)){
// std::ofstream myfile;
// myfile.open ("pressureConvergenceAt075s.txt",std::ios::app);
// myfile << it << "\t" << DpErrorNorm << "\n";
// myfile.close();
// }
// else if(currentTime>(1.0-tolerance) && currentTime<(1.0+tolerance)){
// std::ofstream myfile;
// myfile.open ("pressureConvergenceAt100s.txt",std::ios::app);
// myfile << it << "\t" << DpErrorNorm << "\n";
// myfile.close();
// }
if (!ConvergedContinuity && BaseType::GetEchoLevel() > 0 && Rank == 0)
std::cout << "Continuity equation did not reach the convergence tolerance." << std::endl;
return ConvergedContinuity;
}
bool CheckVelocityConvergence(const double NormDv, double &errorNormDv)
{
ModelPart &rModelPart = BaseType::GetModelPart();
double NormV = 0.00;
errorNormDv = 0;
#pragma omp parallel reduction(+ \
: NormV)
{
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
const array_1d<double, 3> &Vel = itNode->FastGetSolutionStepValue(VELOCITY);
double NormVelNode = 0;
for (unsigned int d = 0; d < 3; ++d)
{
NormVelNode += Vel[d] * Vel[d];
NormV += Vel[d] * Vel[d];
}
}
}
BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormV);
NormV = sqrt(NormV);
if (NormV == 0.0)
NormV = 1.00;
errorNormDv = NormDv / NormV;
if (BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0)
{
std::cout << "The norm of velocity increment is: " << NormDv << std::endl;
std::cout << "The norm of velocity is: " << NormV << std::endl;
std::cout << "Velocity error: " << errorNormDv << "mVelocityTolerance: " << mVelocityTolerance << std::endl;
}
/* else{ */
/* std::cout<<"Velocity error: "<< errorNormDv <<" velTol: " << mVelocityTolerance<< std::endl; */
/* } */
if (errorNormDv < mVelocityTolerance)
{
return true;
}
else
{
return false;
}
}
void ComputeErrorL2NormCaseImposedG()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double currentTime = rCurrentProcessInfo[TIME];
double sumErrorL2Velocity = 0;
double sumErrorL2VelocityX = 0;
double sumErrorL2VelocityY = 0;
double sumErrorL2Pressure = 0;
double sumErrorL2TauXX = 0;
double sumErrorL2TauYY = 0;
double sumErrorL2TauXY = 0;
#pragma omp parallel
{
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
const double posX = itNode->X();
const double posY = itNode->Y();
const double nodalArea = itNode->FastGetSolutionStepValue(NODAL_VOLUME);
const double velX = itNode->FastGetSolutionStepValue(VELOCITY_X);
const double velY = itNode->FastGetSolutionStepValue(VELOCITY_Y);
const double pressure = itNode->FastGetSolutionStepValue(PRESSURE);
const double tauXX = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[0];
const double tauYY = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[1];
const double tauXY = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[2];
double expectedVelocityX = pow(posX, 2) * (1.0 - posX) * (1.0 - posX) * (2.0 * posY - 6.0 * pow(posY, 2) + 4.0 * pow(posY, 3));
double expectedVelocityY = -pow(posY, 2) * (1.0 - posY) * (1.0 - posY) * (2.0 * posX - 6.0 * pow(posX, 2) + 4.0 * pow(posX, 3));
double expectedPressure = -posX * (1.0 - posX);
double expectedTauXX = 2.0 * (-4.0 * (1 - posX) * posX * (-1.0 + 2.0 * posX) * posY * (1.0 - 3.0 * posY + 2.0 * pow(posY, 2)));
double expectedTauYY = 2.0 * (4.0 * posX * (1.0 - 3.0 * posX + 2.0 * pow(posX, 2)) * (1 - posY) * posY * (-1.0 + 2.0 * posY));
double expectedTauXY = (2.0 * (1.0 - 6.0 * posY + 6.0 * pow(posY, 2)) * (1 - posX) * (1 - posX) * pow(posX, 2) - 2.0 * (1.0 - 6.0 * posX + 6.0 * pow(posX, 2)) * (1 - posY) * (1 - posY) * pow(posY, 2));
double nodalErrorVelocityX = velX - expectedVelocityX;
double nodalErrorVelocityY = velY - expectedVelocityY;
double nodalErrorPressure = pressure - expectedPressure;
double nodalErrorTauXX = tauXX - expectedTauXX;
double nodalErrorTauYY = tauYY - expectedTauYY;
double nodalErrorTauXY = tauXY - expectedTauXY;
sumErrorL2Velocity += (pow(nodalErrorVelocityX, 2) + pow(nodalErrorVelocityY, 2)) * nodalArea;
sumErrorL2VelocityX += pow(nodalErrorVelocityX, 2) * nodalArea;
sumErrorL2VelocityY += pow(nodalErrorVelocityY, 2) * nodalArea;
sumErrorL2Pressure += pow(nodalErrorPressure, 2) * nodalArea;
sumErrorL2TauXX += pow(nodalErrorTauXX, 2) * nodalArea;
sumErrorL2TauYY += pow(nodalErrorTauYY, 2) * nodalArea;
sumErrorL2TauXY += pow(nodalErrorTauXY, 2) * nodalArea;
// itNode->FastGetSolutionStepValue(NODAL_ERROR_XX)=nodalErrorTauXX;
}
}
double errorL2Velocity = sqrt(sumErrorL2Velocity);
double errorL2VelocityX = sqrt(sumErrorL2VelocityX);
double errorL2VelocityY = sqrt(sumErrorL2VelocityY);
double errorL2Pressure = sqrt(sumErrorL2Pressure);
double errorL2TauXX = sqrt(sumErrorL2TauXX);
double errorL2TauYY = sqrt(sumErrorL2TauYY);
double errorL2TauXY = sqrt(sumErrorL2TauXY);
std::ofstream myfileVelocity;
myfileVelocity.open("errorL2VelocityFile.txt", std::ios::app);
myfileVelocity << currentTime << "\t" << errorL2Velocity << "\n";
myfileVelocity.close();
std::ofstream myfileVelocityX;
myfileVelocityX.open("errorL2VelocityXFile.txt", std::ios::app);
myfileVelocityX << currentTime << "\t" << errorL2VelocityX << "\n";
myfileVelocityX.close();
std::ofstream myfileVelocityY;
myfileVelocityY.open("errorL2VelocityYFile.txt", std::ios::app);
myfileVelocityY << currentTime << "\t" << errorL2VelocityY << "\n";
myfileVelocityY.close();
std::ofstream myfilePressure;
myfilePressure.open("errorL2PressureFile.txt", std::ios::app);
myfilePressure << currentTime << "\t" << errorL2Pressure << "\n";
myfilePressure.close();
std::ofstream myfileTauXX;
myfileTauXX.open("errorL2TauXXFile.txt", std::ios::app);
myfileTauXX << currentTime << "\t" << errorL2TauXX << "\n";
myfileTauXX.close();
std::ofstream myfileTauYY;
myfileTauYY.open("errorL2TauYYFile.txt", std::ios::app);
myfileTauYY << currentTime << "\t" << errorL2TauYY << "\n";
myfileTauYY.close();
std::ofstream myfileTauXY;
myfileTauXY.open("errorL2TauXYFile.txt", std::ios::app);
myfileTauXY << currentTime << "\t" << errorL2TauXY << "\n";
myfileTauXY.close();
}
void ComputeErrorL2NormCasePoiseuille()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double currentTime = rCurrentProcessInfo[TIME];
double sumErrorL2VelocityTheta = 0;
double sumErrorL2TauTheta = 0;
double r_in = 0.2;
double R_out = 0.5;
double kappa = r_in / R_out;
double omega = 0.5;
double viscosity = 100.0;
#pragma omp parallel
{
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
const double posX = itNode->X();
const double posY = itNode->Y();
const double rPos = sqrt(pow(posX, 2) + pow(posY, 2));
const double cosalfa = posX / rPos;
const double sinalfa = posY / rPos;
const double sin2alfa = 2.0 * cosalfa * sinalfa;
const double cos2alfa = 1.0 - 2.0 * pow(sinalfa, 2);
const double nodalArea = itNode->FastGetSolutionStepValue(NODAL_VOLUME);
const double velX = itNode->FastGetSolutionStepValue(VELOCITY_X);
const double velY = itNode->FastGetSolutionStepValue(VELOCITY_Y);
const double tauXX = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[0];
const double tauYY = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[1];
const double tauXY = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[2];
double expectedVelocityTheta = pow(kappa, 2) * omega * R_out / (1.0 - pow(kappa, 2)) * (R_out / rPos - rPos / R_out);
double computedVelocityTheta = sqrt(pow(velX, 2) + pow(velY, 2));
double nodalErrorVelocityTheta = computedVelocityTheta - expectedVelocityTheta;
double expectedTauTheta = (2.0 * viscosity * pow(kappa, 2) * omega * pow(R_out, 2)) / (1.0 - pow(kappa, 2)) / pow(rPos, 2);
double computedTauTheta = +(tauXX - tauYY) * sin2alfa / 2.0 - tauXY * cos2alfa;
double nodalErrorTauTheta = computedTauTheta - expectedTauTheta;
itNode->FastGetSolutionStepValue(NODAL_ERROR_XX) = computedVelocityTheta;
// if(posY>-0.01 && posY<0.01){
// std::cout<<"expectedTauTheta "<<expectedTauTheta<<" computedTauTheta "<<computedTauTheta <<std::endl;
// std::cout<<"tauXX "<<tauXX<<" tauYY "<<tauYY<<" tauXY "<<tauXY <<std::endl;
// std::cout<<"posX "<<posX <<" posY "<<posY <<std::endl;
// std::cout<<"\n ";
// }
// if(posX>-0.01 && posX<0.01){
// std::cout<<"expectedTauTheta "<<expectedTauTheta<<" computedTauTheta "<<computedTauTheta <<std::endl;
// std::cout<<"tauXX "<<tauXX<<" tauYY "<<tauYY<<" tauXY "<<tauXY <<std::endl;
// std::cout<<"posX "<<posX <<" posY "<<posY <<std::endl;
// std::cout<<"\n ";
// }
sumErrorL2VelocityTheta += pow(nodalErrorVelocityTheta, 2) * nodalArea;
sumErrorL2TauTheta += pow(nodalErrorTauTheta, 2) * nodalArea;
}
}
double errorL2VelocityTheta = sqrt(sumErrorL2VelocityTheta);
double errorL2TauTheta = sqrt(sumErrorL2TauTheta);
std::ofstream myfileVelocity;
myfileVelocity.open("errorL2Poiseuille.txt", std::ios::app);
myfileVelocity << currentTime << "\t" << errorL2VelocityTheta << "\t" << errorL2TauTheta << "\n";
myfileVelocity.close();
}
bool CheckPressureConvergence(const double NormDp, double &errorNormDp)
{
ModelPart &rModelPart = BaseType::GetModelPart();
double NormP = 0.00;
errorNormDp = 0;
// #pragma omp parallel reduction(+:NormP)
// {
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
const double Pr = itNode->FastGetSolutionStepValue(PRESSURE);
NormP += Pr * Pr;
}
// }
BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormP);
NormP = sqrt(NormP);
if (NormP == 0.0)
NormP = 1.00;
errorNormDp = NormDp / NormP;
if (BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0)
{
std::cout << " The norm of pressure increment is: " << NormDp << std::endl;
std::cout << " The norm of pressure is: " << NormP << std::endl;
std::cout << " Pressure error: " << errorNormDp << std::endl;
}
/* else{ */
/* std::cout<<" Pressure error: "<<errorNormDp <<" presTol: "<<mPressureTolerance << std::endl; */
/* } */
if (errorNormDp < mPressureTolerance)
{
return true;
}
else
return false;
}
bool FixTimeStepMomentum(const double DvErrorNorm)
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
double currentTime = rCurrentProcessInfo[TIME];
double timeInterval = rCurrentProcessInfo[DELTA_TIME];
double minTolerance = 0.005;
bool fixedTimeStep = false;
if (currentTime < 3 * timeInterval)
{
minTolerance = 10;
}
bool isItNan = false;
isItNan = std::isnan(DvErrorNorm);
bool isItInf = false;
isItInf = std::isinf(DvErrorNorm);
if ((DvErrorNorm > minTolerance || (DvErrorNorm < 0 && DvErrorNorm > 0) || (DvErrorNorm != DvErrorNorm) || isItNan == true || isItInf == true) && DvErrorNorm != 0 && DvErrorNorm != 1)
{
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, true);
std::cout << "NOT GOOD CONVERGENCE!!! I'll reduce the next time interval" << DvErrorNorm << std::endl;
minTolerance = 0.05;
if (DvErrorNorm > minTolerance)
{
std::cout << "BAD CONVERGENCE!!! I GO AHEAD WITH THE PREVIOUS VELOCITY AND PRESSURE FIELDS" << DvErrorNorm << std::endl;
fixedTimeStep = true;
// #pragma omp parallel
// {
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
itNode->FastGetSolutionStepValue(VELOCITY, 0) = itNode->FastGetSolutionStepValue(VELOCITY, 1);
itNode->FastGetSolutionStepValue(PRESSURE, 0) = itNode->FastGetSolutionStepValue(PRESSURE, 1);
itNode->FastGetSolutionStepValue(ACCELERATION, 0) = itNode->FastGetSolutionStepValue(ACCELERATION, 1);
}
// }
}
}
else
{
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, false);
}
return fixedTimeStep;
}
bool CheckMomentumConvergence(const double DvErrorNorm)
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
double minTolerance = 0.99999;
bool fixedTimeStep = false;
bool isItNan = false;
isItNan = std::isnan(DvErrorNorm);
bool isItInf = false;
isItInf = std::isinf(DvErrorNorm);
if ((DvErrorNorm > minTolerance || (DvErrorNorm < 0 && DvErrorNorm > 0) || (DvErrorNorm != DvErrorNorm) || isItNan == true || isItInf == true) && DvErrorNorm != 0 && DvErrorNorm != 1)
{
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, true);
std::cout << " BAD CONVERGENCE DETECTED DURING THE ITERATIVE LOOP!!! error: " << DvErrorNorm << " higher than 0.9999" << std::endl;
std::cout << " I GO AHEAD WITH THE PREVIOUS VELOCITY AND PRESSURE FIELDS" << std::endl;
fixedTimeStep = true;
#pragma omp parallel
{
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
itNode->FastGetSolutionStepValue(VELOCITY, 0) = itNode->FastGetSolutionStepValue(VELOCITY, 1);
itNode->FastGetSolutionStepValue(PRESSURE, 0) = itNode->FastGetSolutionStepValue(PRESSURE, 1);
itNode->FastGetSolutionStepValue(ACCELERATION, 0) = itNode->FastGetSolutionStepValue(ACCELERATION, 1);
}
}
}
else
{
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, false);
}
return fixedTimeStep;
}
bool FixTimeStepContinuity(const double DvErrorNorm)
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
double currentTime = rCurrentProcessInfo[TIME];
double timeInterval = rCurrentProcessInfo[DELTA_TIME];
double minTolerance = 0.01;
bool fixedTimeStep = false;
if (currentTime < 3 * timeInterval)
{
minTolerance = 10;
}
bool isItNan = false;
isItNan = std::isnan(DvErrorNorm);
bool isItInf = false;
isItInf = std::isinf(DvErrorNorm);
if ((DvErrorNorm > minTolerance || (DvErrorNorm < 0 && DvErrorNorm > 0) || (DvErrorNorm != DvErrorNorm) || isItNan == true || isItInf == true) && DvErrorNorm != 0 && DvErrorNorm != 1)
{
fixedTimeStep = true;
rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, true);
}
else
{
rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, false);
}
return fixedTimeStep;
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
// private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
double mVelocityTolerance;
double mPressureTolerance;
unsigned int mMaxPressureIter;
unsigned int mDomainSize;
unsigned int mTimeOrder;
bool mReformDofSet;
// Fractional step index.
/* 1 : Momentum step (calculate fractional step velocity)
* 2-3 : Unused (reserved for componentwise calculation of frac step velocity)
* 4 : Pressure step
* 5 : Computation of projections
* 6 : End of step velocity
*/
// unsigned int mStepId;
/// Scheme for the solution of the momentum equation
StrategyPointerType mpMomentumStrategy;
/// Scheme for the solution of the mass equation
StrategyPointerType mpPressureStrategy;
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
void InitializeStrategy(SolverSettingsType &rSolverConfig)
{
KRATOS_TRY;
mTimeOrder = rSolverConfig.GetTimeOrder();
// Check that input parameters are reasonable and sufficient.
this->Check();
//ModelPart& rModelPart = this->GetModelPart();
mDomainSize = rSolverConfig.GetDomainSize();
mReformDofSet = rSolverConfig.GetReformDofSet();
BaseType::SetEchoLevel(rSolverConfig.GetEchoLevel());
// Initialize strategies for each step
bool HaveVelStrategy = rSolverConfig.FindStrategy(SolverSettingsType::Velocity, mpMomentumStrategy);
if (HaveVelStrategy)
{
rSolverConfig.FindTolerance(SolverSettingsType::Velocity, mVelocityTolerance);
/* rSolverConfig.FindMaxIter(SolverSettingsType::Velocity,mMaxVelocityIter); */
}
else
{
KRATOS_THROW_ERROR(std::runtime_error, "NodalTwoStepVPStrategy error: No Velocity strategy defined in FractionalStepSettings", "");
}
bool HavePressStrategy = rSolverConfig.FindStrategy(SolverSettingsType::Pressure, mpPressureStrategy);
if (HavePressStrategy)
{
rSolverConfig.FindTolerance(SolverSettingsType::Pressure, mPressureTolerance);
rSolverConfig.FindMaxIter(SolverSettingsType::Pressure, mMaxPressureIter);
}
else
{
KRATOS_THROW_ERROR(std::runtime_error, "NodalTwoStepVPStrategy error: No Pressure strategy defined in FractionalStepSettings", "");
}
// Check input parameters
this->Check();
KRATOS_CATCH("");
}
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
NodalTwoStepVPStrategy &operator=(NodalTwoStepVPStrategy const &rOther) {}
/// Copy constructor.
NodalTwoStepVPStrategy(NodalTwoStepVPStrategy const &rOther) {}
///@}
}; /// Class NodalTwoStepVPStrategy
///@}
///@name Type Definitions
///@{
///@}
///@} // addtogroup
} // namespace Kratos.
#endif // KRATOS_NODAL_TWO_STEP_V_P_STRATEGY_H
|
lu_decompose.c | /**
* \file
* \brief [LU decomposition](https://en.wikipedia.org/wiki/LU_decompositon) of a
* square matrix
* \author [Krishna Vedala](https://github.com/kvedala)
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#ifdef _OPENMP
#include <omp.h>
#endif
/** Perform LU decomposition on matrix
* \param[in] A matrix to decompose
* \param[out] L output L matrix
* \param[out] U output U matrix
* \param[in] mat_size input square matrix size
*/
int lu_decomposition(double **A, double **L, double **U, int mat_size)
{
int row, col, j;
// regularize each row
for (row = 0; row < mat_size; row++)
{
// Upper triangular matrix
#ifdef _OPENMP
#pragma omp for
#endif
for (col = row; col < mat_size; col++)
{
// Summation of L[i,j] * U[j,k]
double lu_sum = 0.;
for (j = 0; j < row; j++) lu_sum += L[row][j] * U[j][col];
// Evaluate U[i,k]
U[row][col] = A[row][col] - lu_sum;
}
// Lower triangular matrix
#ifdef _OPENMP
#pragma omp for
#endif
for (col = row; col < mat_size; col++)
{
if (row == col)
{
L[row][col] = 1.;
continue;
}
// Summation of L[i,j] * U[j,k]
double lu_sum = 0.;
for (j = 0; j < row; j++) lu_sum += L[col][j] * U[j][row];
// Evaluate U[i,k]
L[col][row] = (A[col][row] - lu_sum) / U[row][row];
}
}
return 0;
}
/** Function to display square matrix */
void display(double **A, int N)
{
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
{
printf("% 3.3g \t", A[i][j]);
}
putchar('\n');
}
}
/** Main function */
int main(int argc, char **argv)
{
int mat_size = 3; // default matrix size
const int range = 10;
const int range2 = range >> 1;
if (argc == 2)
mat_size = atoi(argv[1]);
srand(time(NULL)); // random number initializer
/* Create a square matrix with random values */
double **A = (double **)malloc(mat_size * sizeof(double *));
double **L = (double **)malloc(mat_size * sizeof(double *)); // output
double **U = (double **)malloc(mat_size * sizeof(double *)); // output
for (int i = 0; i < mat_size; i++)
{
// calloc so that all valeus are '0' by default
A[i] = (double *)calloc(mat_size, sizeof(double));
L[i] = (double *)calloc(mat_size, sizeof(double));
U[i] = (double *)calloc(mat_size, sizeof(double));
for (int j = 0; j < mat_size; j++)
/* create random values in the limits [-range2, range-1] */
A[i][j] = (double)(rand() % range - range2);
}
lu_decomposition(A, L, U, mat_size);
printf("A = \n");
display(A, mat_size);
printf("\nL = \n");
display(L, mat_size);
printf("\nU = \n");
display(U, mat_size);
/* Free dynamically allocated memory */
for (int i = 0; i < mat_size; i++)
{
free(A[i]);
free(L[i]);
free(U[i]);
}
free(A);
free(L);
free(U);
return 0;
} |
nvector_openmp.c | /* -----------------------------------------------------------------
* Programmer(s): David J. Gardner and Carol S. Woodward @ LLNL
* -----------------------------------------------------------------
* Acknowledgements: This NVECTOR module is based on the NVECTOR
* Serial module by Scott D. Cohen, Alan C.
* Hindmarsh, Radu Serban, and Aaron Collier
* @ LLNL
* -----------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2021, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
* -----------------------------------------------------------------
* This is the implementation file for an OpenMP implementation
* of the NVECTOR module.
* -----------------------------------------------------------------*/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <nvector/nvector_openmp.h>
#include <sundials/sundials_math.h>
#define ZERO RCONST(0.0)
#define HALF RCONST(0.5)
#define ONE RCONST(1.0)
#define ONEPT5 RCONST(1.5)
/* Private functions for special cases of vector operations */
static void VCopy_OpenMP(N_Vector x, N_Vector z); /* z=x */
static void VSum_OpenMP(N_Vector x, N_Vector y, N_Vector z); /* z=x+y */
static void VDiff_OpenMP(N_Vector x, N_Vector y, N_Vector z); /* z=x-y */
static void VNeg_OpenMP(N_Vector x, N_Vector z); /* z=-x */
static void VScaleSum_OpenMP(realtype c, N_Vector x, N_Vector y, N_Vector z); /* z=c(x+y) */
static void VScaleDiff_OpenMP(realtype c, N_Vector x, N_Vector y, N_Vector z); /* z=c(x-y) */
static void VLin1_OpenMP(realtype a, N_Vector x, N_Vector y, N_Vector z); /* z=ax+y */
static void VLin2_OpenMP(realtype a, N_Vector x, N_Vector y, N_Vector z); /* z=ax-y */
static void Vaxpy_OpenMP(realtype a, N_Vector x, N_Vector y); /* y <- ax+y */
static void VScaleBy_OpenMP(realtype a, N_Vector x); /* x <- ax */
/* Private functions for special cases of vector array operations */
static int VSumVectorArray_OpenMP(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=X+Y */
static int VDiffVectorArray_OpenMP(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=X-Y */
static int VScaleSumVectorArray_OpenMP(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=c(X+Y) */
static int VScaleDiffVectorArray_OpenMP(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=c(X-Y) */
static int VLin1VectorArray_OpenMP(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=aX+Y */
static int VLin2VectorArray_OpenMP(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=aX-Y */
static int VaxpyVectorArray_OpenMP(int nvec, realtype a, N_Vector* X, N_Vector* Y); /* Y <- aX+Y */
/*
* -----------------------------------------------------------------
* exported functions
* -----------------------------------------------------------------
*/
/* ----------------------------------------------------------------
* Returns vector type ID. Used to identify vector implementation
* from abstract N_Vector interface.
*/
N_Vector_ID N_VGetVectorID_OpenMP(N_Vector v)
{
return SUNDIALS_NVEC_OPENMP;
}
/* ----------------------------------------------------------------------------
* Function to create a new empty vector
*/
N_Vector N_VNewEmpty_OpenMP(sunindextype length, int num_threads)
{
N_Vector v;
N_VectorContent_OpenMP content;
/* Create vector */
v = NULL;
v = N_VNewEmpty();
if (v == NULL) return(NULL);
/* Attach operations */
/* constructors, destructors, and utility operations */
v->ops->nvgetvectorid = N_VGetVectorID_OpenMP;
v->ops->nvclone = N_VClone_OpenMP;
v->ops->nvcloneempty = N_VCloneEmpty_OpenMP;
v->ops->nvdestroy = N_VDestroy_OpenMP;
v->ops->nvspace = N_VSpace_OpenMP;
v->ops->nvgetarraypointer = N_VGetArrayPointer_OpenMP;
v->ops->nvsetarraypointer = N_VSetArrayPointer_OpenMP;
v->ops->nvgetlength = N_VGetLength_OpenMP;
/* standard vector operations */
v->ops->nvlinearsum = N_VLinearSum_OpenMP;
v->ops->nvconst = N_VConst_OpenMP;
v->ops->nvprod = N_VProd_OpenMP;
v->ops->nvdiv = N_VDiv_OpenMP;
v->ops->nvscale = N_VScale_OpenMP;
v->ops->nvabs = N_VAbs_OpenMP;
v->ops->nvinv = N_VInv_OpenMP;
v->ops->nvaddconst = N_VAddConst_OpenMP;
v->ops->nvdotprod = N_VDotProd_OpenMP;
v->ops->nvmaxnorm = N_VMaxNorm_OpenMP;
v->ops->nvwrmsnormmask = N_VWrmsNormMask_OpenMP;
v->ops->nvwrmsnorm = N_VWrmsNorm_OpenMP;
v->ops->nvmin = N_VMin_OpenMP;
v->ops->nvwl2norm = N_VWL2Norm_OpenMP;
v->ops->nvl1norm = N_VL1Norm_OpenMP;
v->ops->nvcompare = N_VCompare_OpenMP;
v->ops->nvinvtest = N_VInvTest_OpenMP;
v->ops->nvconstrmask = N_VConstrMask_OpenMP;
v->ops->nvminquotient = N_VMinQuotient_OpenMP;
/* fused and vector array operations are disabled (NULL) by default */
/* local reduction kernels */
v->ops->nvdotprodlocal = N_VDotProd_OpenMP;
v->ops->nvmaxnormlocal = N_VMaxNorm_OpenMP;
v->ops->nvminlocal = N_VMin_OpenMP;
v->ops->nvl1normlocal = N_VL1Norm_OpenMP;
v->ops->nvinvtestlocal = N_VInvTest_OpenMP;
v->ops->nvconstrmasklocal = N_VConstrMask_OpenMP;
v->ops->nvminquotientlocal = N_VMinQuotient_OpenMP;
v->ops->nvwsqrsumlocal = N_VWSqrSumLocal_OpenMP;
v->ops->nvwsqrsummasklocal = N_VWSqrSumMaskLocal_OpenMP;
/* XBraid interface operations */
v->ops->nvbufsize = N_VBufSize_OpenMP;
v->ops->nvbufpack = N_VBufPack_OpenMP;
v->ops->nvbufunpack = N_VBufUnpack_OpenMP;
/* Create content */
content = NULL;
content = (N_VectorContent_OpenMP) malloc(sizeof *content);
if (content == NULL) { N_VDestroy(v); return(NULL); }
/* Attach content */
v->content = content;
/* Initialize content */
content->length = length;
content->num_threads = num_threads;
content->own_data = SUNFALSE;
content->data = NULL;
return(v);
}
/* ----------------------------------------------------------------------------
* Function to create a new vector
*/
N_Vector N_VNew_OpenMP(sunindextype length, int num_threads)
{
N_Vector v;
realtype *data;
v = NULL;
v = N_VNewEmpty_OpenMP(length, num_threads);
if (v == NULL) return(NULL);
/* Create data */
if (length > 0) {
/* Allocate memory */
data = NULL;
data = (realtype *) malloc(length * sizeof(realtype));
if(data == NULL) { N_VDestroy_OpenMP(v); return(NULL); }
/* Attach data */
NV_OWN_DATA_OMP(v) = SUNTRUE;
NV_DATA_OMP(v) = data;
}
return(v);
}
/* ----------------------------------------------------------------------------
* Function to create a vector with user data component
*/
N_Vector N_VMake_OpenMP(sunindextype length, realtype *v_data, int num_threads)
{
N_Vector v;
v = NULL;
v = N_VNewEmpty_OpenMP(length, num_threads);
if (v == NULL) return(NULL);
if (length > 0) {
/* Attach data */
NV_OWN_DATA_OMP(v) = SUNFALSE;
NV_DATA_OMP(v) = v_data;
}
return(v);
}
/* ----------------------------------------------------------------------------
* Function to create an array of new vectors.
*/
N_Vector* N_VCloneVectorArray_OpenMP(int count, N_Vector w)
{
N_Vector* vs;
int j;
if (count <= 0) return(NULL);
vs = NULL;
vs = (N_Vector*) malloc(count * sizeof(N_Vector));
if(vs == NULL) return(NULL);
for (j = 0; j < count; j++) {
vs[j] = NULL;
vs[j] = N_VClone_OpenMP(w);
if (vs[j] == NULL) {
N_VDestroyVectorArray_OpenMP(vs, j-1);
return(NULL);
}
}
return(vs);
}
/* ----------------------------------------------------------------------------
* Function to create an array of new vectors with NULL data array.
*/
N_Vector* N_VCloneVectorArrayEmpty_OpenMP(int count, N_Vector w)
{
N_Vector* vs;
int j;
if (count <= 0) return(NULL);
vs = NULL;
vs = (N_Vector*) malloc(count * sizeof(N_Vector));
if(vs == NULL) return(NULL);
for (j = 0; j < count; j++) {
vs[j] = NULL;
vs[j] = N_VCloneEmpty_OpenMP(w);
if (vs[j] == NULL) {
N_VDestroyVectorArray_OpenMP(vs, j-1);
return(NULL);
}
}
return(vs);
}
/* ----------------------------------------------------------------------------
* Function to free an array created with N_VCloneVectorArray_OpenMP
*/
void N_VDestroyVectorArray_OpenMP(N_Vector* vs, int count)
{
int j;
for (j = 0; j < count; j++) N_VDestroy_OpenMP(vs[j]);
free(vs); vs = NULL;
return;
}
/* ----------------------------------------------------------------------------
* Function to return number of vector elements
*/
sunindextype N_VGetLength_OpenMP(N_Vector v)
{
return NV_LENGTH_OMP(v);
}
/* ----------------------------------------------------------------------------
* Function to print a vector to stdout
*/
void N_VPrint_OpenMP(N_Vector x)
{
N_VPrintFile_OpenMP(x, stdout);
}
/* ----------------------------------------------------------------------------
* Function to print a vector to outfile
*/
void N_VPrintFile_OpenMP(N_Vector x, FILE *outfile)
{
sunindextype i, N;
realtype *xd;
xd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
for (i = 0; i < N; i++) {
#if defined(SUNDIALS_EXTENDED_PRECISION)
fprintf(outfile, "%11.8Lg\n", xd[i]);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
fprintf(outfile, "%11.8g\n", xd[i]);
#else
fprintf(outfile, "%11.8g\n", xd[i]);
#endif
}
fprintf(outfile, "\n");
return;
}
/*
* -----------------------------------------------------------------
* implementation of vector operations
* -----------------------------------------------------------------
*/
/* ----------------------------------------------------------------------------
* Create new vector from existing vector without attaching data
*/
N_Vector N_VCloneEmpty_OpenMP(N_Vector w)
{
N_Vector v;
N_VectorContent_OpenMP content;
if (w == NULL) return(NULL);
/* Create vector */
v = NULL;
v = N_VNewEmpty();
if (v == NULL) return(NULL);
/* Attach operations */
if (N_VCopyOps(w, v)) { N_VDestroy(v); return(NULL); }
/* Create content */
content = NULL;
content = (N_VectorContent_OpenMP) malloc(sizeof *content);
if (content == NULL) { N_VDestroy(v); return(NULL); }
/* Attach content */
v->content = content;
/* Initialize content */
content->length = NV_LENGTH_OMP(w);
content->num_threads = NV_NUM_THREADS_OMP(w);
content->own_data = SUNFALSE;
content->data = NULL;
return(v);
}
/* ----------------------------------------------------------------------------
* Create new vector from existing vector and attach data
*/
N_Vector N_VClone_OpenMP(N_Vector w)
{
N_Vector v;
realtype *data;
sunindextype length;
v = NULL;
v = N_VCloneEmpty_OpenMP(w);
if (v == NULL) return(NULL);
length = NV_LENGTH_OMP(w);
/* Create data */
if (length > 0) {
/* Allocate memory */
data = NULL;
data = (realtype *) malloc(length * sizeof(realtype));
if(data == NULL) { N_VDestroy_OpenMP(v); return(NULL); }
/* Attach data */
NV_OWN_DATA_OMP(v) = SUNTRUE;
NV_DATA_OMP(v) = data;
}
return(v);
}
/* ----------------------------------------------------------------------------
* Destroy vector and free vector memory
*/
void N_VDestroy_OpenMP(N_Vector v)
{
if (v == NULL) return;
/* free content */
if (v->content != NULL) {
/* free data array if it's owned by the vector */
if (NV_OWN_DATA_OMP(v) && NV_DATA_OMP(v) != NULL) {
free(NV_DATA_OMP(v));
NV_DATA_OMP(v) = NULL;
}
free(v->content);
v->content = NULL;
}
/* free ops and vector */
if (v->ops != NULL) { free(v->ops); v->ops = NULL; }
free(v); v = NULL;
return;
}
/* ----------------------------------------------------------------------------
* Get storage requirement for N_Vector
*/
void N_VSpace_OpenMP(N_Vector v, sunindextype *lrw, sunindextype *liw)
{
*lrw = NV_LENGTH_OMP(v);
*liw = 1;
return;
}
/* ----------------------------------------------------------------------------
* Get vector data pointer
*/
realtype *N_VGetArrayPointer_OpenMP(N_Vector v)
{
return((realtype *) NV_DATA_OMP(v));
}
/* ----------------------------------------------------------------------------
* Set vector data pointer
*/
void N_VSetArrayPointer_OpenMP(realtype *v_data, N_Vector v)
{
if (NV_LENGTH_OMP(v) > 0) NV_DATA_OMP(v) = v_data;
return;
}
/* ----------------------------------------------------------------------------
* Compute linear combination z[i] = a*x[i]+b*y[i]
*/
void N_VLinearSum_OpenMP(realtype a, N_Vector x, realtype b, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype c, *xd, *yd, *zd;
N_Vector v1, v2;
booleantype test;
i = 0; /* initialize to suppress clang warning */
xd = yd = zd = NULL;
if ((b == ONE) && (z == y)) { /* BLAS usage: axpy y <- ax+y */
Vaxpy_OpenMP(a,x,y);
return;
}
if ((a == ONE) && (z == x)) { /* BLAS usage: axpy x <- by+x */
Vaxpy_OpenMP(b,y,x);
return;
}
/* Case: a == b == 1.0 */
if ((a == ONE) && (b == ONE)) {
VSum_OpenMP(x, y, z);
return;
}
/* Cases: (1) a == 1.0, b = -1.0, (2) a == -1.0, b == 1.0 */
if ((test = ((a == ONE) && (b == -ONE))) || ((a == -ONE) && (b == ONE))) {
v1 = test ? y : x;
v2 = test ? x : y;
VDiff_OpenMP(v2, v1, z);
return;
}
/* Cases: (1) a == 1.0, b == other or 0.0, (2) a == other or 0.0, b == 1.0 */
/* if a or b is 0.0, then user should have called N_VScale */
if ((test = (a == ONE)) || (b == ONE)) {
c = test ? b : a;
v1 = test ? y : x;
v2 = test ? x : y;
VLin1_OpenMP(c, v1, v2, z);
return;
}
/* Cases: (1) a == -1.0, b != 1.0, (2) a != 1.0, b == -1.0 */
if ((test = (a == -ONE)) || (b == -ONE)) {
c = test ? b : a;
v1 = test ? y : x;
v2 = test ? x : y;
VLin2_OpenMP(c, v1, v2, z);
return;
}
/* Case: a == b */
/* catches case both a and b are 0.0 - user should have called N_VConst */
if (a == b) {
VScaleSum_OpenMP(a, x, y, z);
return;
}
/* Case: a == -b */
if (a == -b) {
VScaleDiff_OpenMP(a, x, y, z);
return;
}
/* Do all cases not handled above:
(1) a == other, b == 0.0 - user should have called N_VScale
(2) a == 0.0, b == other - user should have called N_VScale
(3) a,b == other, a !=b, a != -b */
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,a,b,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = (a*xd[i])+(b*yd[i]);
return;
}
/* ----------------------------------------------------------------------------
* Assigns constant value to all vector elements, z[i] = c
*/
void N_VConst_OpenMP(realtype c, N_Vector z)
{
sunindextype i, N;
realtype *zd;
i = 0; /* initialize to suppress clang warning */
zd = NULL;
N = NV_LENGTH_OMP(z);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,c,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(z))
for (i = 0; i < N; i++) zd[i] = c;
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise product z[i] = x[i]*y[i]
*/
void N_VProd_OpenMP(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = xd[i]*yd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise division z[i] = x[i]/y[i]
*/
void N_VDiv_OpenMP(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = xd[i]/yd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute scaler multiplication z[i] = c*x[i]
*/
void N_VScale_OpenMP(realtype c, N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = zd = NULL;
if (z == x) { /* BLAS usage: scale x <- cx */
VScaleBy_OpenMP(c, x);
return;
}
if (c == ONE) {
VCopy_OpenMP(x, z);
} else if (c == -ONE) {
VNeg_OpenMP(x, z);
} else {
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,c,xd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = c*xd[i];
}
return;
}
/* ----------------------------------------------------------------------------
* Compute absolute value of vector components z[i] = SUNRabs(x[i])
*/
void N_VAbs_OpenMP(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
#pragma omp parallel for schedule(static) num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = SUNRabs(xd[i]);
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise inverse z[i] = 1 / x[i]
*/
void N_VInv_OpenMP(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,xd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = ONE/xd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise addition of a scaler to a vector z[i] = x[i] + b
*/
void N_VAddConst_OpenMP(N_Vector x, realtype b, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,b,xd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = xd[i]+b;
return;
}
/* ----------------------------------------------------------------------------
* Computes the dot product of two vectors, a = sum(x[i]*y[i])
*/
realtype N_VDotProd_OpenMP(N_Vector x, N_Vector y)
{
sunindextype i, N;
realtype sum, *xd, *yd;
i = 0; /* initialize to suppress clang warning */
sum = ZERO;
xd = yd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
#pragma omp parallel for default(none) private(i) shared(N,xd,yd) \
reduction(+:sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++) {
sum += xd[i]*yd[i];
}
return(sum);
}
/* ----------------------------------------------------------------------------
* Computes max norm of a vector
*/
realtype N_VMaxNorm_OpenMP(N_Vector x)
{
sunindextype i, N;
realtype tmax, max, *xd;
i = 0; /* initialize to suppress clang warning */
max = ZERO;
xd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
#pragma omp parallel default(none) private(i,tmax) shared(N,max,xd) \
num_threads(NV_NUM_THREADS_OMP(x))
{
tmax = ZERO;
#pragma omp for schedule(static)
for (i = 0; i < N; i++) {
if (SUNRabs(xd[i]) > tmax) tmax = SUNRabs(xd[i]);
}
#pragma omp critical
{
if (tmax > max)
max = tmax;
}
}
return(max);
}
/* ----------------------------------------------------------------------------
* Computes weighted root mean square norm of a vector
*/
realtype N_VWrmsNorm_OpenMP(N_Vector x, N_Vector w)
{
return(SUNRsqrt(N_VWSqrSumLocal_OpenMP(x, w)/(NV_LENGTH_OMP(x))));
}
/* ----------------------------------------------------------------------------
* Computes weighted root mean square norm of a masked vector
*/
realtype N_VWrmsNormMask_OpenMP(N_Vector x, N_Vector w, N_Vector id)
{
return(SUNRsqrt(N_VWSqrSumMaskLocal_OpenMP(x, w, id)/(NV_LENGTH_OMP(x))));
}
/* ----------------------------------------------------------------------------
* Finds the minimun component of a vector
*/
realtype N_VMin_OpenMP(N_Vector x)
{
sunindextype i, N;
realtype min, *xd;
realtype tmin;
i = 0; /* initialize to suppress clang warning */
xd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
min = xd[0];
#pragma omp parallel default(none) private(i,tmin) shared(N,min,xd) \
num_threads(NV_NUM_THREADS_OMP(x))
{
tmin = xd[0];
#pragma omp for schedule(static)
for (i = 1; i < N; i++) {
if (xd[i] < tmin) tmin = xd[i];
}
if (tmin < min) {
#pragma omp critical
{
if (tmin < min) min = tmin;
}
}
}
return(min);
}
/* ----------------------------------------------------------------------------
* Computes weighted L2 norm of a vector
*/
realtype N_VWL2Norm_OpenMP(N_Vector x, N_Vector w)
{
sunindextype i, N;
realtype sum, *xd, *wd;
i = 0; /* initialize to suppress clang warning */
sum = ZERO;
xd = wd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
wd = NV_DATA_OMP(w);
#pragma omp parallel for default(none) private(i) shared(N,xd,wd) \
reduction(+:sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++) {
sum += SUNSQR(xd[i]*wd[i]);
}
return(SUNRsqrt(sum));
}
/* ----------------------------------------------------------------------------
* Computes L1 norm of a vector
*/
realtype N_VL1Norm_OpenMP(N_Vector x)
{
sunindextype i, N;
realtype sum, *xd;
i = 0; /* initialize to suppress clang warning */
sum = ZERO;
xd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
#pragma omp parallel for default(none) private(i) shared(N,xd) \
reduction(+:sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i<N; i++)
sum += SUNRabs(xd[i]);
return(sum);
}
/* ----------------------------------------------------------------------------
* Compare vector component values to a scaler
*/
void N_VCompare_OpenMP(realtype c, N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,c,xd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++) {
zd[i] = (SUNRabs(xd[i]) >= c) ? ONE : ZERO;
}
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise inverse z[i] = ONE/x[i] and checks if x[i] == ZERO
*/
booleantype N_VInvTest_OpenMP(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd, val;
i = 0; /* initialize to suppress clang warning */
xd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
val = ZERO;
#pragma omp parallel for default(none) private(i) shared(N,val,xd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++) {
if (xd[i] == ZERO)
val = ONE;
else
zd[i] = ONE/xd[i];
}
if (val > ZERO)
return (SUNFALSE);
else
return (SUNTRUE);
}
/* ----------------------------------------------------------------------------
* Compute constraint mask of a vector
*/
booleantype N_VConstrMask_OpenMP(N_Vector c, N_Vector x, N_Vector m)
{
sunindextype i, N;
realtype temp;
realtype *cd, *xd, *md;
booleantype test;
i = 0; /* initialize to suppress clang warning */
cd = xd = md = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
cd = NV_DATA_OMP(c);
md = NV_DATA_OMP(m);
temp = ZERO;
#pragma omp parallel for default(none) private(i,test) shared(N,xd,cd,md,temp) \
schedule(static) num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++) {
md[i] = ZERO;
/* Continue if no constraints were set for the variable */
if (cd[i] == ZERO)
continue;
/* Check if a set constraint has been violated */
test = (SUNRabs(cd[i]) > ONEPT5 && xd[i]*cd[i] <= ZERO) ||
(SUNRabs(cd[i]) > HALF && xd[i]*cd[i] < ZERO);
if (test) {
temp = md[i] = ONE; /* Here is a race to write to temp */
}
}
/* Return false if any constraint was violated */
return (temp == ONE) ? SUNFALSE : SUNTRUE;
}
/* ----------------------------------------------------------------------------
* Compute minimum componentwise quotient
*/
realtype N_VMinQuotient_OpenMP(N_Vector num, N_Vector denom)
{
sunindextype i, N;
realtype *nd, *dd, min, tmin, val;
i = 0; /* initialize to suppress clang warning */
nd = dd = NULL;
N = NV_LENGTH_OMP(num);
nd = NV_DATA_OMP(num);
dd = NV_DATA_OMP(denom);
min = BIG_REAL;
#pragma omp parallel default(none) private(i,tmin,val) shared(N,min,nd,dd) \
num_threads(NV_NUM_THREADS_OMP(num))
{
tmin = BIG_REAL;
#pragma omp for schedule(static)
for (i = 0; i < N; i++) {
if (dd[i] != ZERO) {
val = nd[i]/dd[i];
if (val < tmin) tmin = val;
}
}
if (tmin < min) {
#pragma omp critical
{
if (tmin < min) min = tmin;
}
}
}
return(min);
}
/* ----------------------------------------------------------------------------
* Computes weighted square sum of a vector
*/
realtype N_VWSqrSumLocal_OpenMP(N_Vector x, N_Vector w)
{
sunindextype i, N;
realtype sum, *xd, *wd;
i = 0; /* initialize to suppress clang warning */
sum = ZERO;
xd = wd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
wd = NV_DATA_OMP(w);
#pragma omp parallel for default(none) private(i) shared(N,xd,wd) \
reduction(+:sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++) {
sum += SUNSQR(xd[i]*wd[i]);
}
return(sum);
}
/* ----------------------------------------------------------------------------
* Computes weighted square sum of a masked vector
*/
realtype N_VWSqrSumMaskLocal_OpenMP(N_Vector x, N_Vector w, N_Vector id)
{
sunindextype i, N;
realtype sum, *xd, *wd, *idd;
i = 0; /* initialize to suppress clang warning */
sum = ZERO;
xd = wd = idd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
wd = NV_DATA_OMP(w);
idd = NV_DATA_OMP(id);
#pragma omp parallel for default(none) private(i) shared(N,xd,wd,idd) \
reduction(+:sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++) {
if (idd[i] > ZERO) {
sum += SUNSQR(xd[i]*wd[i]);
}
}
return(sum);
}
/*
* -----------------------------------------------------------------
* fused vector operations
* -----------------------------------------------------------------
*/
int N_VLinearCombination_OpenMP(int nvec, realtype* c, N_Vector* X, N_Vector z)
{
int i;
sunindextype j, N;
realtype* zd=NULL;
realtype* xd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VScale */
if (nvec == 1) {
N_VScale_OpenMP(c[0], X[0], z);
return(0);
}
/* should have called N_VLinearSum */
if (nvec == 2) {
N_VLinearSum_OpenMP(c[0], X[0], c[1], X[1], z);
return(0);
}
/* get vector length and data array */
N = NV_LENGTH_OMP(z);
zd = NV_DATA_OMP(z);
/*
* X[0] += c[i]*X[i], i = 1,...,nvec-1
*/
if ((X[0] == z) && (c[0] == ONE)) {
#pragma omp parallel default(none) private(i,j,xd) shared(nvec,X,N,c,zd) \
num_threads(NV_NUM_THREADS_OMP(z))
{
for (i=1; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
zd[j] += c[i] * xd[j];
}
}
}
return(0);
}
/*
* X[0] = c[0] * X[0] + sum{ c[i] * X[i] }, i = 1,...,nvec-1
*/
if (X[0] == z) {
#pragma omp parallel default(none) private(i,j,xd) shared(nvec,X,N,c,zd) \
num_threads(NV_NUM_THREADS_OMP(z))
{
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
zd[j] *= c[0];
}
for (i=1; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
zd[j] += c[i] * xd[j];
}
}
}
return(0);
}
/*
* z = sum{ c[i] * X[i] }, i = 0,...,nvec-1
*/
#pragma omp parallel default(none) private(i,j,xd) shared(nvec,X,N,c,zd) \
num_threads(NV_NUM_THREADS_OMP(z))
{
xd = NV_DATA_OMP(X[0]);
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
zd[j] = c[0] * xd[j];
}
for (i=1; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
zd[j] += c[i] * xd[j];
}
}
}
return(0);
}
int N_VScaleAddMulti_OpenMP(int nvec, realtype* a, N_Vector x, N_Vector* Y, N_Vector* Z)
{
int i;
sunindextype j, N;
realtype* xd=NULL;
realtype* yd=NULL;
realtype* zd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VLinearSum */
if (nvec == 1) {
N_VLinearSum_OpenMP(a[0], x, ONE, Y[0], Z[0]);
return(0);
}
/* get vector length and data array */
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
/*
* Y[i][j] += a[i] * x[j]
*/
if (Y == Z) {
#pragma omp parallel default(none) private(i,j,yd) shared(nvec,Y,N,a,xd) \
num_threads(NV_NUM_THREADS_OMP(x))
{
for (i=0; i<nvec; i++) {
yd = NV_DATA_OMP(Y[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
yd[j] += a[i] * xd[j];
}
}
}
return(0);
}
/*
* Z[i][j] = Y[i][j] + a[i] * x[j]
*/
#pragma omp parallel default(none) private(i,j,yd,zd) shared(nvec,Y,Z,N,a,xd) \
num_threads(NV_NUM_THREADS_OMP(x))
{
for (i=0; i<nvec; i++) {
yd = NV_DATA_OMP(Y[i]);
zd = NV_DATA_OMP(Z[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
zd[j] = a[i] * xd[j] + yd[j];
}
}
}
return(0);
}
int N_VDotProdMulti_OpenMP(int nvec, N_Vector x, N_Vector* Y, realtype* dotprods)
{
int i;
sunindextype j, N;
realtype sum;
realtype* xd=NULL;
realtype* yd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VDotProd */
if (nvec == 1) {
dotprods[0] = N_VDotProd_OpenMP(x, Y[0]);
return(0);
}
/* get vector length and data array */
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
/* initialize dot products */
for (i=0; i<nvec; i++) {
dotprods[i] = ZERO;
}
/* compute multiple dot products */
#pragma omp parallel default(none) private(i,j,yd,sum) shared(nvec,Y,N,xd,dotprods) \
num_threads(NV_NUM_THREADS_OMP(x))
{
for (i=0; i<nvec; i++) {
yd = NV_DATA_OMP(Y[i]);
sum = ZERO;
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
sum += xd[j] * yd[j];
}
#pragma omp critical
{
dotprods[i] += sum;
}
}
}
return(0);
}
/*
* -----------------------------------------------------------------
* vector array operations
* -----------------------------------------------------------------
*/
int N_VLinearSumVectorArray_OpenMP(int nvec,
realtype a, N_Vector* X,
realtype b, N_Vector* Y,
N_Vector* Z)
{
int i;
sunindextype j, N;
realtype* xd=NULL;
realtype* yd=NULL;
realtype* zd=NULL;
realtype c;
N_Vector* V1;
N_Vector* V2;
booleantype test;
i = 0; /* initialize to suppress clang warning */
j = 0;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VLinearSum */
if (nvec == 1) {
N_VLinearSum_OpenMP(a, X[0], b, Y[0], Z[0]);
return(0);
}
/* BLAS usage: axpy y <- ax+y */
if ((b == ONE) && (Z == Y))
return(VaxpyVectorArray_OpenMP(nvec, a, X, Y));
/* BLAS usage: axpy x <- by+x */
if ((a == ONE) && (Z == X))
return(VaxpyVectorArray_OpenMP(nvec, b, Y, X));
/* Case: a == b == 1.0 */
if ((a == ONE) && (b == ONE))
return(VSumVectorArray_OpenMP(nvec, X, Y, Z));
/* Cases: */
/* (1) a == 1.0, b = -1.0, */
/* (2) a == -1.0, b == 1.0 */
if ((test = ((a == ONE) && (b == -ONE))) || ((a == -ONE) && (b == ONE))) {
V1 = test ? Y : X;
V2 = test ? X : Y;
return(VDiffVectorArray_OpenMP(nvec, V2, V1, Z));
}
/* Cases: */
/* (1) a == 1.0, b == other or 0.0, */
/* (2) a == other or 0.0, b == 1.0 */
/* if a or b is 0.0, then user should have called N_VScale */
if ((test = (a == ONE)) || (b == ONE)) {
c = test ? b : a;
V1 = test ? Y : X;
V2 = test ? X : Y;
return(VLin1VectorArray_OpenMP(nvec, c, V1, V2, Z));
}
/* Cases: */
/* (1) a == -1.0, b != 1.0, */
/* (2) a != 1.0, b == -1.0 */
if ((test = (a == -ONE)) || (b == -ONE)) {
c = test ? b : a;
V1 = test ? Y : X;
V2 = test ? X : Y;
return(VLin2VectorArray_OpenMP(nvec, c, V1, V2, Z));
}
/* Case: a == b */
/* catches case both a and b are 0.0 - user should have called N_VConst */
if (a == b)
return(VScaleSumVectorArray_OpenMP(nvec, a, X, Y, Z));
/* Case: a == -b */
if (a == -b)
return(VScaleDiffVectorArray_OpenMP(nvec, a, X, Y, Z));
/* Do all cases not handled above: */
/* (1) a == other, b == 0.0 - user should have called N_VScale */
/* (2) a == 0.0, b == other - user should have called N_VScale */
/* (3) a,b == other, a !=b, a != -b */
/* get vector length */
N = NV_LENGTH_OMP(Z[0]);
/* compute linear sum for each vector pair in vector arrays */
#pragma omp parallel default(none) private(i,j,xd,yd,zd) shared(nvec,X,Y,Z,N,a,b) \
num_threads(NV_NUM_THREADS_OMP(Z[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
yd = NV_DATA_OMP(Y[i]);
zd = NV_DATA_OMP(Z[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
zd[j] = a * xd[j] + b * yd[j];
}
}
}
return(0);
}
int N_VScaleVectorArray_OpenMP(int nvec, realtype* c, N_Vector* X, N_Vector* Z)
{
int i;
sunindextype j, N;
realtype* xd=NULL;
realtype* zd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VScale */
if (nvec == 1) {
N_VScale_OpenMP(c[0], X[0], Z[0]);
return(0);
}
/* get vector length */
N = NV_LENGTH_OMP(Z[0]);
/*
* X[i] *= c[i]
*/
if (X == Z) {
#pragma omp parallel default(none) private(i,j,xd) shared(nvec,X,N,c) \
num_threads(NV_NUM_THREADS_OMP(Z[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
xd[j] *= c[i];
}
}
}
return(0);
}
/*
* Z[i] = c[i] * X[i]
*/
#pragma omp parallel default(none) private(i,j,xd,zd) shared(nvec,X,Z,N,c) \
num_threads(NV_NUM_THREADS_OMP(Z[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
zd = NV_DATA_OMP(Z[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
zd[j] = c[i] * xd[j];
}
}
}
return(0);
}
int N_VConstVectorArray_OpenMP(int nvec, realtype c, N_Vector* Z)
{
int i;
sunindextype j, N;
realtype* zd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VConst */
if (nvec == 1) {
N_VConst_OpenMP(c, Z[0]);
return(0);
}
/* get vector length */
N = NV_LENGTH_OMP(Z[0]);
/* set each vector in the vector array to a constant */
#pragma omp parallel default(none) private(i,j,zd) shared(nvec,Z,N,c) \
num_threads(NV_NUM_THREADS_OMP(Z[0]))
{
for (i=0; i<nvec; i++) {
zd = NV_DATA_OMP(Z[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
zd[j] = c;
}
}
}
return(0);
}
int N_VWrmsNormVectorArray_OpenMP(int nvec, N_Vector* X, N_Vector* W, realtype* nrm)
{
int i;
sunindextype j, N;
realtype sum;
realtype* wd=NULL;
realtype* xd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VWrmsNorm */
if (nvec == 1) {
nrm[0] = N_VWrmsNorm_OpenMP(X[0], W[0]);
return(0);
}
/* get vector length */
N = NV_LENGTH_OMP(X[0]);
/* initialize norms */
for (i=0; i<nvec; i++) {
nrm[i] = ZERO;
}
/* compute the WRMS norm for each vector in the vector array */
#pragma omp parallel default(none) private(i,j,xd,wd,sum) shared(nvec,X,W,N,nrm) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
wd = NV_DATA_OMP(W[i]);
sum = ZERO;
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
sum += SUNSQR(xd[j] * wd[j]);
}
#pragma omp critical
{
nrm[i] += sum;
}
}
}
for (i=0; i<nvec; i++) {
nrm[i] = SUNRsqrt(nrm[i]/N);
}
return(0);
}
int N_VWrmsNormMaskVectorArray_OpenMP(int nvec, N_Vector* X, N_Vector* W,
N_Vector id, realtype* nrm)
{
int i;
sunindextype j, N;
realtype sum;
realtype* wd=NULL;
realtype* xd=NULL;
realtype* idd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VWrmsNorm */
if (nvec == 1) {
nrm[0] = N_VWrmsNormMask_OpenMP(X[0], W[0], id);
return(0);
}
/* get vector length and mask data array */
N = NV_LENGTH_OMP(X[0]);
idd = NV_DATA_OMP(id);
/* initialize norms */
for (i=0; i<nvec; i++) {
nrm[i] = ZERO;
}
/* compute the WRMS norm for each vector in the vector array */
#pragma omp parallel default(none) private(i,j,xd,wd,sum) shared(nvec,X,W,N,idd,nrm) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
wd = NV_DATA_OMP(W[i]);
sum = ZERO;
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
if (idd[j] > ZERO)
sum += SUNSQR(xd[j] * wd[j]);
}
#pragma omp critical
{
nrm[i] += sum;
}
}
}
for (i=0; i<nvec; i++) {
nrm[i] = SUNRsqrt(nrm[i]/N);
}
return(0);
}
int N_VScaleAddMultiVectorArray_OpenMP(int nvec, int nsum, realtype* a,
N_Vector* X, N_Vector** Y, N_Vector** Z)
{
int i, j;
sunindextype k, N;
realtype* xd=NULL;
realtype* yd=NULL;
realtype* zd=NULL;
int retval;
N_Vector* YY;
N_Vector* ZZ;
i = 0; /* initialize to suppress clang warning */
k = 0;
/* invalid number of vectors */
if (nvec < 1) return(-1);
if (nsum < 1) return(-1);
/* ---------------------------
* Special cases for nvec == 1
* --------------------------- */
if (nvec == 1) {
/* should have called N_VLinearSum */
if (nsum == 1) {
N_VLinearSum_OpenMP(a[0], X[0], ONE, Y[0][0], Z[0][0]);
return(0);
}
/* should have called N_VScaleAddMulti */
YY = (N_Vector*) malloc(nsum * sizeof(N_Vector));
ZZ = (N_Vector*) malloc(nsum * sizeof(N_Vector));
for (j=0; j<nsum; j++) {
YY[j] = Y[j][0];
ZZ[j] = Z[j][0];
}
retval = N_VScaleAddMulti_OpenMP(nsum, a, X[0], YY, ZZ);
free(YY);
free(ZZ);
return(retval);
}
/* --------------------------
* Special cases for nvec > 1
* -------------------------- */
/* should have called N_VLinearSumVectorArray */
if (nsum == 1) {
retval = N_VLinearSumVectorArray_OpenMP(nvec, a[0], X, ONE, Y[0], Z[0]);
return(retval);
}
/* ----------------------------
* Compute multiple linear sums
* ---------------------------- */
/* get vector length */
N = NV_LENGTH_OMP(X[0]);
/*
* Y[i][j] += a[i] * x[j]
*/
if (Y == Z) {
#pragma omp parallel default(none) private(i,j,k,xd,yd) shared(nvec,nsum,X,Y,N,a) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
for (j=0; j<nsum; j++) {
yd = NV_DATA_OMP(Y[j][i]);
#pragma omp for schedule(static)
for (k=0; k<N; k++) {
yd[k] += a[j] * xd[k];
}
}
}
}
return(0);
}
/*
* Z[i][j] = Y[i][j] + a[i] * x[j]
*/
#pragma omp parallel default(none) private(i,j,k,xd,yd,zd) shared(nvec,nsum,X,Y,Z,N,a) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
for (j=0; j<nsum; j++) {
yd = NV_DATA_OMP(Y[j][i]);
zd = NV_DATA_OMP(Z[j][i]);
#pragma omp for schedule(static)
for (k=0; k<N; k++) {
zd[k] = a[j] * xd[k] + yd[k];
}
}
}
}
return(0);
}
int N_VLinearCombinationVectorArray_OpenMP(int nvec, int nsum,
realtype* c,
N_Vector** X,
N_Vector* Z)
{
int i; /* vector arrays index in summation [0,nsum) */
int j; /* vector index in vector array [0,nvec) */
sunindextype k; /* element index in vector [0,N) */
sunindextype N;
realtype* zd=NULL;
realtype* xd=NULL;
realtype* ctmp;
N_Vector* Y;
i = 0; /* initialize to suppress clang warning */
k = 0;
/* invalid number of vectors */
if (nvec < 1) return(-1);
if (nsum < 1) return(-1);
/* ---------------------------
* Special cases for nvec == 1
* --------------------------- */
if (nvec == 1) {
/* should have called N_VScale */
if (nsum == 1) {
N_VScale_OpenMP(c[0], X[0][0], Z[0]);
return(0);
}
/* should have called N_VLinearSum */
if (nsum == 2) {
N_VLinearSum_OpenMP(c[0], X[0][0], c[1], X[1][0], Z[0]);
return(0);
}
/* should have called N_VLinearCombination */
Y = (N_Vector*) malloc(nsum * sizeof(N_Vector));
for (i=0; i<nsum; i++) {
Y[i] = X[i][0];
}
N_VLinearCombination_OpenMP(nsum, c, Y, Z[0]);
free(Y);
return(0);
}
/* --------------------------
* Special cases for nvec > 1
* -------------------------- */
/* should have called N_VScaleVectorArray */
if (nsum == 1) {
ctmp = (realtype*) malloc(nvec * sizeof(realtype));
for (j=0; j<nvec; j++) {
ctmp[j] = c[0];
}
N_VScaleVectorArray_OpenMP(nvec, ctmp, X[0], Z);
free(ctmp);
return(0);
}
/* should have called N_VLinearSumVectorArray */
if (nsum == 2) {
N_VLinearSumVectorArray_OpenMP(nvec, c[0], X[0], c[1], X[1], Z);
return(0);
}
/* --------------------------
* Compute linear combination
* -------------------------- */
/* get vector length */
N = NV_LENGTH_OMP(Z[0]);
/*
* X[0][j] += c[i]*X[i][j], i = 1,...,nvec-1
*/
if ((X[0] == Z) && (c[0] == ONE)) {
#pragma omp parallel default(none) private(i,j,k,xd,zd) shared(nvec,nsum,X,Z,N,c) \
num_threads(NV_NUM_THREADS_OMP(Z[0]))
{
for (j=0; j<nvec; j++) {
zd = NV_DATA_OMP(Z[j]);
for (i=1; i<nsum; i++) {
xd = NV_DATA_OMP(X[i][j]);
#pragma omp for schedule(static)
for (k=0; k<N; k++) {
zd[k] += c[i] * xd[k];
}
}
}
}
return(0);
}
/*
* X[0][j] = c[0] * X[0][j] + sum{ c[i] * X[i][j] }, i = 1,...,nvec-1
*/
if (X[0] == Z) {
#pragma omp parallel default(none) private(i,j,k,xd,zd) shared(nvec,nsum,X,Z,N,c) \
num_threads(NV_NUM_THREADS_OMP(Z[0]))
{
for (j=0; j<nvec; j++) {
zd = NV_DATA_OMP(Z[j]);
#pragma omp for schedule(static)
for (k=0; k<N; k++) {
zd[k] *= c[0];
}
for (i=1; i<nsum; i++) {
xd = NV_DATA_OMP(X[i][j]);
#pragma omp for schedule(static)
for (k=0; k<N; k++) {
zd[k] += c[i] * xd[k];
}
}
}
}
return(0);
}
/*
* Z[j] = sum{ c[i] * X[i][j] }, i = 0,...,nvec-1
*/
#pragma omp parallel default(none) private(i,j,k,xd,zd) shared(nvec,nsum,X,Z,N,c) \
num_threads(NV_NUM_THREADS_OMP(Z[0]))
{
for (j=0; j<nvec; j++) {
/* scale first vector in the sum into the output vector */
xd = NV_DATA_OMP(X[0][j]);
zd = NV_DATA_OMP(Z[j]);
#pragma omp for schedule(static)
for (k=0; k<N; k++) {
zd[k] = c[0] * xd[k];
}
/* scale and sum remaining vectors into the output vector */
for (i=1; i<nsum; i++) {
xd = NV_DATA_OMP(X[i][j]);
#pragma omp for schedule(static)
for (k=0; k<N; k++) {
zd[k] += c[i] * xd[k];
}
}
}
}
return(0);
}
/*
* -----------------------------------------------------------------
* OPTIONAL XBraid interface operations
* -----------------------------------------------------------------
*/
int N_VBufSize_OpenMP(N_Vector x, sunindextype *size)
{
if (x == NULL) return(-1);
*size = NV_LENGTH_OMP(x) * ((sunindextype)sizeof(realtype));
return(0);
}
int N_VBufPack_OpenMP(N_Vector x, void *buf)
{
sunindextype i, N;
realtype *xd = NULL;
realtype *bd = NULL;
if (x == NULL || buf == NULL) return(-1);
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
bd = (realtype*) buf;
#pragma omp for schedule(static)
for (i = 0; i < N; i++)
bd[i] = xd[i];
return(0);
}
int N_VBufUnpack_OpenMP(N_Vector x, void *buf)
{
sunindextype i, N;
realtype *xd = NULL;
realtype *bd = NULL;
if (x == NULL || buf == NULL) return(-1);
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
bd = (realtype*) buf;
#pragma omp for schedule(static)
for (i = 0; i < N; i++)
xd[i] = bd[i];
return(0);
}
/*
* -----------------------------------------------------------------
* private functions for special cases of vector operations
* -----------------------------------------------------------------
*/
/* ----------------------------------------------------------------------------
* Copy vector components into a second vector
*/
static void VCopy_OpenMP(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,xd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = xd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute vector sum
*/
static void VSum_OpenMP(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = xd[i]+yd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute vector difference
*/
static void VDiff_OpenMP(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = xd[i]-yd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute the negative of a vector
*/
static void VNeg_OpenMP(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,xd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = -xd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute scaled vector sum
*/
static void VScaleSum_OpenMP(realtype c, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,c,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = c*(xd[i]+yd[i]);
return;
}
/* ----------------------------------------------------------------------------
* Compute scaled vector difference
*/
static void VScaleDiff_OpenMP(realtype c, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,c,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = c*(xd[i]-yd[i]);
return;
}
/* ----------------------------------------------------------------------------
* Compute vector sum z[i] = a*x[i]+y[i]
*/
static void VLin1_OpenMP(realtype a, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,a,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = (a*xd[i])+yd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute vector difference z[i] = a*x[i]-y[i]
*/
static void VLin2_OpenMP(realtype a, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,a,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = (a*xd[i])-yd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute special cases of linear sum
*/
static void Vaxpy_OpenMP(realtype a, N_Vector x, N_Vector y)
{
sunindextype i, N;
realtype *xd, *yd;
i = 0; /* initialize to suppress clang warning */
xd = yd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
if (a == ONE) {
#pragma omp parallel for default(none) private(i) shared(N,xd,yd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
yd[i] += xd[i];
return;
}
if (a == -ONE) {
#pragma omp parallel for default(none) private(i) shared(N,xd,yd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
yd[i] -= xd[i];
return;
}
#pragma omp parallel for default(none) private(i) shared(N,a,xd,yd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
yd[i] += a*xd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute scaled vector x[i] = a*x[i]
*/
static void VScaleBy_OpenMP(realtype a, N_Vector x)
{
sunindextype i, N;
realtype *xd;
i = 0; /* initialize to suppress clang warning */
xd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
#pragma omp parallel for default(none) private(i) shared(N,a,xd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
xd[i] *= a;
return;
}
/*
* -----------------------------------------------------------------
* private functions for special cases of vector array operations
* -----------------------------------------------------------------
*/
static int VSumVectorArray_OpenMP(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i;
sunindextype j, N;
realtype* xd=NULL;
realtype* yd=NULL;
realtype* zd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
N = NV_LENGTH_OMP(X[0]);
#pragma omp parallel default(none) private(i,j,xd,yd,zd) shared(nvec,X,Y,Z,N) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
yd = NV_DATA_OMP(Y[i]);
zd = NV_DATA_OMP(Z[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++)
zd[j] = xd[j] + yd[j];
}
}
return(0);
}
static int VDiffVectorArray_OpenMP(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i;
sunindextype j, N;
realtype* xd=NULL;
realtype* yd=NULL;
realtype* zd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
N = NV_LENGTH_OMP(X[0]);
#pragma omp parallel default(none) private(i,j,xd,yd,zd) shared(nvec,X,Y,Z,N) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
yd = NV_DATA_OMP(Y[i]);
zd = NV_DATA_OMP(Z[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++)
zd[j] = xd[j] - yd[j];
}
}
return(0);
}
static int VScaleSumVectorArray_OpenMP(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i;
sunindextype j, N;
realtype* xd=NULL;
realtype* yd=NULL;
realtype* zd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
N = NV_LENGTH_OMP(X[0]);
#pragma omp parallel default(none) private(i,j,xd,yd,zd) shared(nvec,X,Y,Z,N,c) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
yd = NV_DATA_OMP(Y[i]);
zd = NV_DATA_OMP(Z[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++)
zd[j] = c * (xd[j] + yd[j]);
}
}
return(0);
}
static int VScaleDiffVectorArray_OpenMP(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i;
sunindextype j, N;
realtype* xd=NULL;
realtype* yd=NULL;
realtype* zd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
N = NV_LENGTH_OMP(X[0]);
#pragma omp parallel default(none) private(i,j,xd,yd,zd) shared(nvec,X,Y,Z,N,c) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
yd = NV_DATA_OMP(Y[i]);
zd = NV_DATA_OMP(Z[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++)
zd[j] = c * (xd[j] - yd[j]);
}
}
return(0);
}
static int VLin1VectorArray_OpenMP(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i;
sunindextype j, N;
realtype* xd=NULL;
realtype* yd=NULL;
realtype* zd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
N = NV_LENGTH_OMP(X[0]);
#pragma omp parallel default(none) private(i,j,xd,yd,zd) shared(nvec,X,Y,Z,N,a) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
yd = NV_DATA_OMP(Y[i]);
zd = NV_DATA_OMP(Z[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++)
zd[j] = (a * xd[j]) + yd[j];
}
}
return(0);
}
static int VLin2VectorArray_OpenMP(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i;
sunindextype j, N;
realtype* xd=NULL;
realtype* yd=NULL;
realtype* zd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
N = NV_LENGTH_OMP(X[0]);
#pragma omp parallel default(none) private(i,j,xd,yd,zd) shared(nvec,X,Y,Z,N,a) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
yd = NV_DATA_OMP(Y[i]);
zd = NV_DATA_OMP(Z[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++)
zd[j] = (a * xd[j]) - yd[j];
}
}
return(0);
}
static int VaxpyVectorArray_OpenMP(int nvec, realtype a, N_Vector* X, N_Vector* Y)
{
int i;
sunindextype j, N;
realtype* xd=NULL;
realtype* yd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
N = NV_LENGTH_OMP(X[0]);
if (a == ONE) {
#pragma omp parallel default(none) private(i,j,xd,yd) shared(nvec,X,Y,N,a) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
yd = NV_DATA_OMP(Y[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++)
yd[j] += xd[j];
}
}
return(0);
}
if (a == -ONE) {
#pragma omp parallel default(none) private(i,j,xd,yd) shared(nvec,X,Y,N,a) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
yd = NV_DATA_OMP(Y[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++)
yd[j] -= xd[j];
}
}
return(0);
}
#pragma omp parallel default(none) private(i,j,xd,yd) shared(nvec,X,Y,N,a) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
yd = NV_DATA_OMP(Y[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++)
yd[j] += a * xd[j];
}
}
return(0);
}
/*
* -----------------------------------------------------------------
* Enable / Disable fused and vector array operations
* -----------------------------------------------------------------
*/
int N_VEnableFusedOps_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
if (tf) {
/* enable all fused vector operations */
v->ops->nvlinearcombination = N_VLinearCombination_OpenMP;
v->ops->nvscaleaddmulti = N_VScaleAddMulti_OpenMP;
v->ops->nvdotprodmulti = N_VDotProdMulti_OpenMP;
/* enable all vector array operations */
v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_OpenMP;
v->ops->nvscalevectorarray = N_VScaleVectorArray_OpenMP;
v->ops->nvconstvectorarray = N_VConstVectorArray_OpenMP;
v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_OpenMP;
v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_OpenMP;
v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_OpenMP;
v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_OpenMP;
} else {
/* disable all fused vector operations */
v->ops->nvlinearcombination = NULL;
v->ops->nvscaleaddmulti = NULL;
v->ops->nvdotprodmulti = NULL;
/* disable all vector array operations */
v->ops->nvlinearsumvectorarray = NULL;
v->ops->nvscalevectorarray = NULL;
v->ops->nvconstvectorarray = NULL;
v->ops->nvwrmsnormvectorarray = NULL;
v->ops->nvwrmsnormmaskvectorarray = NULL;
v->ops->nvscaleaddmultivectorarray = NULL;
v->ops->nvlinearcombinationvectorarray = NULL;
}
/* return success */
return(0);
}
int N_VEnableLinearCombination_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvlinearcombination = N_VLinearCombination_OpenMP;
else
v->ops->nvlinearcombination = NULL;
/* return success */
return(0);
}
int N_VEnableScaleAddMulti_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvscaleaddmulti = N_VScaleAddMulti_OpenMP;
else
v->ops->nvscaleaddmulti = NULL;
/* return success */
return(0);
}
int N_VEnableDotProdMulti_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvdotprodmulti = N_VDotProdMulti_OpenMP;
else
v->ops->nvdotprodmulti = NULL;
/* return success */
return(0);
}
int N_VEnableLinearSumVectorArray_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_OpenMP;
else
v->ops->nvlinearsumvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableScaleVectorArray_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvscalevectorarray = N_VScaleVectorArray_OpenMP;
else
v->ops->nvscalevectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableConstVectorArray_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvconstvectorarray = N_VConstVectorArray_OpenMP;
else
v->ops->nvconstvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableWrmsNormVectorArray_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_OpenMP;
else
v->ops->nvwrmsnormvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableWrmsNormMaskVectorArray_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_OpenMP;
else
v->ops->nvwrmsnormmaskvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableScaleAddMultiVectorArray_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_OpenMP;
else
v->ops->nvscaleaddmultivectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableLinearCombinationVectorArray_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_OpenMP;
else
v->ops->nvlinearcombinationvectorarray = NULL;
/* return success */
return(0);
}
|
GB_unop__identity_uint64_int8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_uint64_int8
// op(A') function: GB_unop_tran__identity_uint64_int8
// C type: uint64_t
// A type: int8_t
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint64_t z = (uint64_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint64_t z = (uint64_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_uint64_int8
(
uint64_t *Cx, // Cx and Ax may be aliased
const int8_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
uint64_t z = (uint64_t) aij ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_uint64_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
dropout-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file dropout-inl.h
* \brief
* \author Bing Xu, Da Zheng, Hang Zhang
*/
#ifndef MXNET_OPERATOR_NN_DROPOUT_INL_H_
#define MXNET_OPERATOR_NN_DROPOUT_INL_H_
#include <dmlc/logging.h>
#include <dmlc/parameter.h>
#include <mxnet/operator.h>
#include <map>
#include <vector>
#include <string>
#include <utility>
#include <algorithm>
#include "../mxnet_op.h"
#include "../mshadow_op.h"
#include "../random/sampler.h"
#include "../tensor/elemwise_binary_broadcast_op.h"
#if defined(USE_MKL) && defined(_OPENMP) && !defined(__CUDACC__)
#define MXNET_USE_MKL_DROPOUT 1
#endif
#if MXNET_USE_MKL_DROPOUT
#include <omp.h>
#include <mkl_vml_functions.h>
#include <mkl_vsl.h>
#endif // MXNET_USE_MKL_DROPOUT
#define MXNET_USE_CUDNN_DROPOUT MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 7
namespace dropout {
enum DropoutOpInputs {kData};
enum DropoutOpOutputs {kOut, kMask};
enum DropoutOpForwardResource {kRandom};
enum DropoutOpMode {kTraining, kAlways};
} // namespace dropout
namespace mxnet {
namespace op {
const int MAX_DIM = 5;
struct DropoutParam : public dmlc::Parameter<DropoutParam> {
float p;
int mode;
mxnet::TShape axes;
dmlc::optional<bool> cudnn_off;
DMLC_DECLARE_PARAMETER(DropoutParam) {
DMLC_DECLARE_FIELD(p).set_default(0.5)
.set_range(0, 1)
.describe("Fraction of the input that gets dropped out during training time.");
DMLC_DECLARE_FIELD(mode)
.add_enum("training", dropout::kTraining)
.add_enum("always", dropout::kAlways)
.set_default(dropout::kTraining)
.describe("Whether to only turn on dropout during training or to also turn on for inference.");
DMLC_DECLARE_FIELD(axes).set_default(mxnet::TShape(0, 0))
.describe("Axes for variational dropout kernel.");
DMLC_DECLARE_FIELD(cudnn_off).set_default(dmlc::optional<bool>(false))
.describe("Whether to turn off cudnn in dropout operator. "
"This option is ignored if axes is specified.");
}
}; // struct DropoutParam
template<typename xpu, typename DType>
class DropoutOp {
#if MXNET_USE_MKL_DROPOUT
static void BernoulliGenerate(common::random::RandGenerator<cpu, DType> gen,
int n, double p, int* r) {
typename RandGenerator<xpu, DType>::Impl genImpl(&gen, 1);
const int seed = 17 + abs(genImpl.rand() % 4096);
CHECK_GE(seed, 0);
const int nthr = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
#pragma omp parallel num_threads(nthr)
{
const int ithr = omp_get_thread_num();
const int avg_amount = (n + nthr - 1) / nthr;
const int my_offset = ithr * avg_amount;
const int my_amount = std::min(my_offset + avg_amount, n) - my_offset;
if (my_amount > 0) {
VSLStreamStatePtr stream;
vslNewStream(&stream, VSL_BRNG_MCG31, seed);
vslSkipAheadStream(stream, my_offset);
viRngBernoulli(VSL_RNG_METHOD_BERNOULLI_ICDF, stream, my_amount, r + my_offset, p);
vslDeleteStream(&stream);
}
}
}
static inline bool MKLAvailable() {
// BernoulliGenerate expects an array int, so for types smaller than int, the mask buffer
// will be too small, so we can;t use MKL in those cases
return sizeof(DType) >= sizeof(int);
}
// MKL forward pass
inline void MKLForward(const OpContext &ctx,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_data) {
Stream<xpu> *s = ctx.get_stream<xpu>();
RandGenerator<xpu, DType> *pgen = ctx.requested[0].get_parallel_random<xpu, DType>();
CHECK_NOTNULL(pgen);
Tensor<xpu, 2, DType> mask = out_data[dropout::kMask].FlatTo2D<xpu, DType>(s);
Tensor<xpu, 2, DType> data = in_data[dropout::kData].FlatTo2D<xpu, DType>(s);
Tensor<xpu, 2, DType> out = out_data[dropout::kOut].FlatTo2D<xpu, DType>(s);
DType *outptr = out.dptr_;
DType *dataptr = data.dptr_;
auto maskptr = reinterpret_cast<int *>(mask.dptr_);
int count = mask.shape_[0] * mask.shape_[1];
BernoulliGenerate(*pgen, count, this->pkeep_, maskptr);
const float pk_1 = 1.0f / this->pkeep_;
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (int i = 0; i < count; ++i) {
outptr[i] = dataptr[i] * maskptr[i] * pk_1;
}
}
// MKL backward pass
inline void MKLBackward(const OpContext &ctx,
const std::vector<TBlob> &in_grad,
const std::vector<TBlob> &out_data,
const std::vector<TBlob> &out_grad) {
Stream<xpu> *s = ctx.get_stream<xpu>();
Tensor<xpu, 2, DType> grad = out_grad[dropout::kOut].FlatTo2D<xpu, DType>(s);
Tensor<xpu, 2, DType> mask = out_data[dropout::kMask].FlatTo2D<xpu, DType>(s);
Tensor<xpu, 2, DType> gdata = in_grad[dropout::kData].FlatTo2D<xpu, DType>(s);
DType *ingradptr = gdata.dptr_;
const DType *outgradptr = grad.dptr_;
auto maskptr = reinterpret_cast<int *>(mask.dptr_);
int count = mask.shape_[0] * mask.shape_[1];
const float pk_1 = 1.0f / this->pkeep_;
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (int i = 0; i < count; ++i) {
ingradptr[i] = outgradptr[i] * maskptr[i] * pk_1;
}
}
#endif // #if MXNET_USE_MKL_DROPOUT
public:
/*!
* \brief Dropout kernel, compute dropout tensor
*/
struct DropoutKernel {
/*!
* \brief Dropout kernel function
* \param id Thread number (0-based representing count)
* \param gen Random number generator
* \param N Total number of items in the output
* \param step Step between items, related to parallelism
* \param dropout_out Output dropout values
* \param mask_out Output mask (is multiplied to create dropout output, may be 0)
* \param input_data Input data to perform the dropout on
* \param pkeep Dropout rate (keep when the generated random number is less than this value)
*/
MSHADOW_XINLINE static void Map(int id,
RandGenerator<xpu, DType> gen,
const int N,
const int step,
DType *dropout_out,
DType *mask_out,
const DType *input_data,
const real_t pkeep) {
RNG_KERNEL_LOOP(xpu, DType, id, gen, N, step, {
const real_t rand_num = static_cast<real_t>(genImpl.uniform());
mask_out[i] = mshadow_op::threshold_eq::Map<real_t>(rand_num, pkeep) * (1.0f / pkeep);
dropout_out[i] = input_data[i] * mask_out[i];
});
}
};
struct BernoulliKernel {
/*! \brief Bernoulli kernel for generating mask */
MSHADOW_XINLINE static void Map(int id,
RandGenerator<xpu, DType> gen,
const int N,
const int step,
DType *mask_out,
const real_t pkeep) {
RNG_KERNEL_LOOP(xpu, DType, id, gen, N, step, {
const real_t rand_num = static_cast<real_t>(genImpl.uniform());
mask_out[i] = mshadow_op::threshold::Map<real_t>(rand_num, pkeep) * (1.0f / pkeep);
});
}
};
explicit DropoutOp(const DropoutParam ¶m, Context ctx) {
this->pkeep_ = 1.0f - param.p;
this->mode_ = static_cast<dropout::DropoutOpMode>(param.mode);
this->axes_ = param.axes;
this->dropout_passthrough_ = true;
#if MXNET_USE_CUDNN_DROPOUT
this->cudnn_off_ = param.cudnn_off && param.cudnn_off.value();
this->ctx_ = ctx;
if (ctx.dev_type == kGPU && this->pkeep_ > 0 && !this->cudnn_off_) {
dtype_ = mshadow::DataType<DType>::kCudnnFlag;
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc_));
CUDNN_CALL(cudnnCreateTensorDescriptor(&y_desc_));
CUDNN_CALL(cudnnCreateTensorDescriptor(&dx_desc_));
CUDNN_CALL(cudnnCreateTensorDescriptor(&dy_desc_));
CUDNN_CALL(cudnnCreateDropoutDescriptor(&dropout_desc_));
}
#endif // MXNET_USE_CUDNN_DROPOUT
}
~DropoutOp() {
#if MXNET_USE_CUDNN_DROPOUT
if (this->ctx_.dev_type == kGPU && this->pkeep_ > 0 && !this->cudnn_off_) {
CUDNN_CALL(cudnnDestroyTensorDescriptor(x_desc_));
CUDNN_CALL(cudnnDestroyTensorDescriptor(y_desc_));
CUDNN_CALL(cudnnDestroyTensorDescriptor(dx_desc_));
CUDNN_CALL(cudnnDestroyTensorDescriptor(dy_desc_));
CUDNN_CALL(cudnnDestroyDropoutDescriptor(dropout_desc_));
}
#endif // MXNET_USE_CUDNN_DROPOUT
}
#if MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__)
inline bool CuDNNAvailable() {
return this->pkeep_ > 0 && !this->cudnn_off_;
}
inline void CuDNNForward(const OpContext &ctx,
const TBlob &in,
const TBlob &mask,
const TBlob &out) {
Stream<xpu> *s = ctx.get_stream<xpu>();
// set dropout state.
ctx.requested[0].get_cudnn_dropout_desc(&dropout_desc_, s, 1.0f - this->pkeep_, seed_);
// describe input/output tensor
int dim[4], stride[4];
dim[0] = 1;
dim[1] = 1;
dim[2] = 1;
dim[3] = out.Size();
stride[0] = out.Size();
stride[1] = out.Size();
stride[2] = out.Size();
stride[3] = 1;
CUDNN_CALL(cudnnSetTensorNdDescriptor(x_desc_,
dtype_,
4,
dim,
stride));
CUDNN_CALL(cudnnSetTensorNdDescriptor(y_desc_,
dtype_,
4,
dim,
stride));
// perform dropout with cudnn
CUDNN_CALL(cudnnDropoutGetReserveSpaceSize(x_desc_, &dropout_reserve_byte_));
// cudnn uses bits to record the positions that are dropped, so reserve bytes is always
// 1/8 of input size.
CHECK_GE(mask.Size() * sizeof(DType), dropout_reserve_byte_) <<
"The size of the mask space is smaller than the required cudnn reserved space.";
CUDNN_CALL(cudnnDropoutForward(s->dnn_handle_,
dropout_desc_,
x_desc_,
in.dptr<DType>(),
y_desc_,
out.dptr<DType>(),
mask.dptr<DType>(),
dropout_reserve_byte_));
}
inline void CuDNNBackward(const OpContext &ctx,
const TBlob &out_grad,
const TBlob &mask,
const TBlob &in_grad) {
Stream<xpu> *s = ctx.get_stream<xpu>();
// describe input/output tensor
int dim[4], stride[4];
dim[0] = 1;
dim[1] = 1;
dim[2] = 1;
dim[3] = in_grad.Size();
stride[0] = in_grad.Size();
stride[1] = in_grad.Size();
stride[2] = in_grad.Size();
stride[3] = 1;
CUDNN_CALL(cudnnSetTensorNdDescriptor(dy_desc_,
dtype_,
4,
dim,
stride));
CUDNN_CALL(cudnnSetTensorNdDescriptor(dx_desc_,
dtype_,
4,
dim,
stride));
// perform dropout with cudnn
CUDNN_CALL(cudnnDropoutBackward(s->dnn_handle_,
dropout_desc_,
dy_desc_,
out_grad.dptr<DType>(),
dx_desc_,
in_grad.dptr<DType>(),
mask.dptr<DType>(),
dropout_reserve_byte_));
}
#endif // MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__)
void Forward(const OpContext &ctx,
const std::vector<TBlob> &in_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &out_data) {
this->dropout_passthrough_ = true;
if (req[dropout::kOut] != kNullOp) {
CHECK_EQ(in_data.size(), 1U);
if (ctx.is_train) {
CHECK_EQ(out_data.size(), 2U);
}
Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob &in = in_data[dropout::kData];
const TBlob &out = out_data[dropout::kOut];
const TBlob &mask = out_data[dropout::kMask];
if (this->pkeep_ < 1 && (ctx.is_train || this->mode_ == dropout::kAlways)) {
this->dropout_passthrough_ = false;
if (this->axes_.ndim() == 0) {
#if MXNET_USE_MKL_DROPOUT
if (MKLAvailable()) {
MKLForward(ctx, in_data, out_data);
return;
}
#endif // MXNET_USE_MKL_DROPOUT
#if MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__)
if (CuDNNAvailable()) {
CuDNNForward(ctx, in, mask, out);
return;
}
#endif // MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__)
RandGenerator<xpu, DType> *pgen = ctx.requested[0].get_parallel_random<xpu, DType>();
CHECK_NOTNULL(pgen);
CHECK(req[dropout::kOut] != kAddTo);
LaunchRNG<DropoutKernel, xpu>(s, pgen, out.Size(),
out.dptr<DType>(),
mask.dptr<DType>(),
in.dptr<DType>(),
this->pkeep_);
return;
} else {
RandGenerator<xpu, DType> *pgen = ctx.requested[0].get_parallel_random<xpu, DType>();
CHECK_NOTNULL(pgen);
// initialize the mask
LaunchRNG<BernoulliKernel, xpu>(s, pgen, mask.Size(),
mask.dptr<DType>(),
this->pkeep_);
// broadcast mul
mxnet::TShape new_lshape, new_rshape, new_oshape;
int ndim = BinaryBroadcastShapeCompact(in.shape_,
mask.shape_, out.shape_,
&new_lshape, &new_rshape, &new_oshape);
if (!ndim) {
MXNET_ASSIGN_REQ_SWITCH(req[dropout::kOut], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch(
s, out.Size(), out.dptr<DType>(), in.dptr<DType>(),
mask.dptr<DType>());
});
} else {
BROADCAST_NDIM_SWITCH(ndim, NDim, {
mshadow::Shape<NDim> oshape = new_oshape.get<NDim>();
mshadow::Shape<NDim> lstride = mxnet_op::calc_stride(new_lshape.get<NDim>());
mshadow::Shape<NDim> rstride = mxnet_op::calc_stride(new_rshape.get<NDim>());
mxnet_op::Kernel<mxnet_op::binary_broadcast_kernel<NDim, DType,
mshadow_op::mul>, xpu>::
template LaunchEx(s, new_oshape.Size(), req[dropout::kOut],
lstride, rstride, oshape,
in.dptr<DType>(),
mask.dptr<DType>(), out.dptr<DType>());
});
}
}
} else {
if (req[dropout::kOut] == kWriteInplace) return;
MXNET_ASSIGN_REQ_SWITCH(req[dropout::kOut], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, xpu>::Launch(
s, out.Size(), out.dptr<DType>(), in.dptr<DType>());
});
}
}
}
void Backward(const OpContext &ctx,
const std::vector<TBlob> &out_grad,
const std::vector<TBlob> &out_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &in_grad) {
using namespace mshadow;
using namespace mshadow::expr;
Stream<xpu> *s = ctx.get_stream<xpu>();
if (!this->dropout_passthrough_) {
this->dropout_passthrough_ = true;
const TBlob &gdata = in_grad[dropout::kData];
const TBlob &grad = out_grad[dropout::kOut];
const TBlob &mask = out_data[dropout::kMask];
if (this->axes_.ndim() == 0) {
#if MXNET_USE_MKL_DROPOUT
if (MKLAvailable()) {
MKLBackward(ctx, in_grad, out_data, out_grad);
return;
}
#endif // MXNET_USE_MKL_DROPOUT
#if MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__)
if (CuDNNAvailable()) {
CuDNNBackward(ctx, grad, mask, gdata);
return;
}
#endif // MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__)
// standard case for dropout
CHECK_EQ(grad.Size(), mask.Size());
MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch(
s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>(), mask.dptr<DType>());
});
return;
} else {
// broardcast mul
mxnet::TShape new_lshape, new_rshape, new_oshape;
int ndim = BinaryBroadcastShapeCompact(grad.shape_,
mask.shape_, gdata.shape_,
&new_lshape, &new_rshape, &new_oshape);
if (!ndim) {
MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch(
s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>(), mask.dptr<DType>());
});
} else {
BROADCAST_NDIM_SWITCH(ndim, NDim, {
mshadow::Shape<NDim> oshape = new_oshape.get<NDim>();
mshadow::Shape<NDim> lstride = mxnet_op::calc_stride(new_lshape.get<NDim>());
mshadow::Shape<NDim> rstride = mxnet_op::calc_stride(new_rshape.get<NDim>());
mxnet_op::Kernel<mxnet_op::binary_broadcast_kernel<NDim, DType,
mshadow_op::mul>, xpu>::
template LaunchEx(s, new_oshape.Size(), req[0], lstride, rstride, oshape,
grad.dptr<DType>(), mask.dptr<DType>(), gdata.dptr<DType>());
});
}
}
} else {
const TBlob& gdata = in_grad[dropout::kData];
const TBlob& grad = out_grad[dropout::kOut];
MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, xpu>::Launch(
s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>());
});
}
}
private:
/*! \brief Dropout rate (keep when the generated random number is less than this value) */
real_t pkeep_;
/*! \brief Dropout mode */
dropout::DropoutOpMode mode_;
/*! \brief Axes on which dropout mask is shared in the form of broadcast multiply */
mxnet::TShape axes_;
/*! \brief Flag to record whether forward is executed in pass-through mode */
bool dropout_passthrough_;
#if MXNET_USE_CUDNN_DROPOUT
bool cudnn_off_;
Context ctx_;
cudnnDataType_t dtype_;
cudnnDropoutDescriptor_t dropout_desc_;
uint64_t seed_ = 17 + rand() % 4096; // NOLINT(runtime/threadsafe_fn)
size_t dropout_reserve_byte_;
cudnnTensorDescriptor_t x_desc_, y_desc_, dx_desc_, dy_desc_;
#endif // MXNET_USE_CUDNN_DROPOUT
}; // class DropoutOp
template<typename xpu>
void DropoutCompute(const OpStatePtr& state,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, {
DropoutOp<xpu, DType>& op = state.get_state<DropoutOp<xpu, DType>>();
op.Forward(ctx, inputs, req, outputs);
});
}
template<typename xpu>
void DropoutGradCompute(const OpStatePtr& state,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1);
CHECK_EQ(req.size(), 1);
std::vector<TBlob> out_grads(2);
std::vector<TBlob> out_data(2);
out_grads[dropout::kOut] = inputs[0];
out_data[dropout::kMask] = inputs[1];
MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, {
DropoutOp<xpu, DType>& op = state.get_state<DropoutOp<xpu, DType>>();
op.Backward(ctx, out_grads, out_data, req, outputs);
});
}
} // namespace op
} // namespace mxnet
#undef MXNET_USE_MKL_DROPOUT
#endif // MXNET_OPERATOR_NN_DROPOUT_INL_H_
|
paint.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP AAA IIIII N N TTTTT %
% P P A A I NN N T %
% PPPP AAAAA I N N N T %
% P A A I N NN T %
% P A A IIIII N N T %
% %
% %
% Methods to Paint on an Image %
% %
% Software Design %
% Cristy %
% July 1998 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/cache.h"
#include "magick/channel.h"
#include "magick/color-private.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/draw.h"
#include "magick/draw-private.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/paint.h"
#include "magick/pixel-private.h"
#include "magick/resource_.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F l o o d f i l l P a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FloodfillPaintImage() changes the color value of any pixel that matches
% target and is an immediate neighbor. If the method FillToBorderMethod is
% specified, the color value is changed for any neighbor pixel that does not
% match the bordercolor member of image.
%
% By default target must match a particular pixel color exactly.
% However, in many cases two colors may differ by a small amount. The
% fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now
% interpreted as the same color for the purposes of the floodfill.
%
% The format of the FloodfillPaintImage method is:
%
% MagickBooleanType FloodfillPaintImage(Image *image,
% const ChannelType channel,const DrawInfo *draw_info,
% const MagickPixelPacket target,const ssize_t x_offset,
% const ssize_t y_offset,const MagickBooleanType invert)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel(s).
%
% o draw_info: the draw info.
%
% o target: the RGB value of the target color.
%
% o x_offset,y_offset: the starting location of the operation.
%
% o invert: paint any pixel that does not match the target color.
%
*/
MagickExport MagickBooleanType FloodfillPaintImage(Image *image,
const ChannelType channel,const DrawInfo *draw_info,
const MagickPixelPacket *target,const ssize_t x_offset,const ssize_t y_offset,
const MagickBooleanType invert)
{
#define MaxStacksize 524288UL
#define PushSegmentStack(up,left,right,delta) \
{ \
if (s >= (segment_stack+MaxStacksize)) \
ThrowBinaryException(DrawError,"SegmentStackOverflow",image->filename) \
else \
{ \
if ((((up)+(delta)) >= 0) && (((up)+(delta)) < (ssize_t) image->rows)) \
{ \
s->x1=(double) (left); \
s->y1=(double) (up); \
s->x2=(double) (right); \
s->y2=(double) (delta); \
s++; \
} \
} \
}
CacheView
*floodplane_view,
*image_view;
ExceptionInfo
*exception;
Image
*floodplane_image;
MagickBooleanType
skip;
MagickPixelPacket
fill,
pixel;
MemoryInfo
*segment_info;
PixelPacket
fill_color;
register SegmentInfo
*s;
SegmentInfo
*segment_stack;
ssize_t
offset,
start,
x,
x1,
x2,
y;
/*
Check boundary conditions.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
if ((x_offset < 0) || (x_offset >= (ssize_t) image->columns))
return(MagickFalse);
if ((y_offset < 0) || (y_offset >= (ssize_t) image->rows))
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace);
if ((image->matte == MagickFalse) &&
(draw_info->fill.opacity != OpaqueOpacity))
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
/*
Set floodfill state.
*/
floodplane_image=CloneImage(image,0,0,MagickTrue,&image->exception);
if (floodplane_image == (Image *) NULL)
return(MagickFalse);
(void) SetImageAlphaChannel(floodplane_image,OpaqueAlphaChannel);
segment_info=AcquireVirtualMemory(MaxStacksize,sizeof(*segment_stack));
if (segment_info == (MemoryInfo *) NULL)
{
floodplane_image=DestroyImage(floodplane_image);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
segment_stack=(SegmentInfo *) GetVirtualMemoryBlob(segment_info);
/*
Push initial segment on stack.
*/
exception=(&image->exception);
x=x_offset;
y=y_offset;
start=0;
s=segment_stack;
PushSegmentStack(y,x,x,1);
PushSegmentStack(y+1,x,x,-1);
GetMagickPixelPacket(image,&fill);
GetMagickPixelPacket(image,&pixel);
image_view=AcquireVirtualCacheView(image,exception);
floodplane_view=AcquireAuthenticCacheView(floodplane_image,exception);
while (s > segment_stack)
{
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
/*
Pop segment off stack.
*/
s--;
x1=(ssize_t) s->x1;
x2=(ssize_t) s->x2;
offset=(ssize_t) s->y2;
y=(ssize_t) s->y1+offset;
/*
Recolor neighboring pixels.
*/
p=GetCacheViewVirtualPixels(image_view,0,y,(size_t) (x1+1),1,exception);
q=GetCacheViewAuthenticPixels(floodplane_view,0,y,(size_t) (x1+1),1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
p+=x1;
q+=x1;
for (x=x1; x >= 0; x--)
{
if (q->opacity == (Quantum) TransparentOpacity)
break;
SetMagickPixelPacket(image,p,indexes+x,&pixel);
if (IsMagickColorSimilar(&pixel,target) == invert)
break;
q->opacity=(Quantum) TransparentOpacity;
p--;
q--;
}
if (SyncCacheViewAuthenticPixels(floodplane_view,exception) == MagickFalse)
break;
skip=x >= x1 ? MagickTrue : MagickFalse;
if (skip == MagickFalse)
{
start=x+1;
if (start < x1)
PushSegmentStack(y,start,x1-1,-offset);
x=x1+1;
}
do
{
if (skip == MagickFalse)
{
if (x < (ssize_t) image->columns)
{
p=GetCacheViewVirtualPixels(image_view,x,y,image->columns-x,1,
exception);
q=GetCacheViewAuthenticPixels(floodplane_view,x,y,
image->columns-x,1,exception);
if ((p == (const PixelPacket *) NULL) ||
(q == (PixelPacket *) NULL))
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for ( ; x < (ssize_t) image->columns; x++)
{
if (q->opacity == (Quantum) TransparentOpacity)
break;
SetMagickPixelPacket(image,p,indexes+x,&pixel);
if (IsMagickColorSimilar(&pixel,target) == invert)
break;
q->opacity=(Quantum) TransparentOpacity;
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(floodplane_view,exception) == MagickFalse)
break;
}
PushSegmentStack(y,start,x-1,offset);
if (x > (x2+1))
PushSegmentStack(y,x2+1,x-1,-offset);
}
skip=MagickFalse;
x++;
if (x <= x2)
{
p=GetCacheViewVirtualPixels(image_view,x,y,(size_t) (x2-x+1),1,
exception);
q=GetCacheViewAuthenticPixels(floodplane_view,x,y,(size_t) (x2-x+1),1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for ( ; x <= x2; x++)
{
if (q->opacity == (Quantum) TransparentOpacity)
break;
SetMagickPixelPacket(image,p,indexes+x,&pixel);
if (IsMagickColorSimilar(&pixel,target) != invert)
break;
p++;
q++;
}
}
start=x;
} while (x <= x2);
}
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
/*
Tile fill color onto floodplane.
*/
p=GetCacheViewVirtualPixels(floodplane_view,0,y,image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
break;
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelOpacity(p) != OpaqueOpacity)
{
(void) GetFillColor(draw_info,x,y,&fill_color);
SetMagickPixelPacket(image,&fill_color,(IndexPacket *) NULL,&fill);
if (image->colorspace == CMYKColorspace)
ConvertRGBToCMYK(&fill);
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(fill.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(fill.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(fill.blue));
if (((channel & OpacityChannel) != 0) ||
(draw_info->fill.opacity != OpaqueOpacity))
SetPixelOpacity(q,ClampToQuantum(fill.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,ClampToQuantum(fill.index));
}
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
break;
}
floodplane_view=DestroyCacheView(floodplane_view);
image_view=DestroyCacheView(image_view);
segment_info=RelinquishVirtualMemory(segment_info);
floodplane_image=DestroyImage(floodplane_image);
return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G r a d i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GradientImage() applies a continuously smooth color transitions along a
% vector from one color to another.
%
% Note, the interface of this method will change in the future to support
% more than one transistion.
%
% The format of the GradientImage method is:
%
% MagickBooleanType GradientImage(Image *image,const GradientType type,
% const SpreadMethod method,const PixelPacket *start_color,
% const PixelPacket *stop_color)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: the gradient type: linear or radial.
%
% o spread: the gradient spread meathod: pad, reflect, or repeat.
%
% o start_color: the start color.
%
% o stop_color: the stop color.
%
% This provides a good example of making use of the DrawGradientImage
% function and the gradient structure in draw_info.
%
*/
MagickExport MagickBooleanType GradientImage(Image *image,
const GradientType type,const SpreadMethod method,
const PixelPacket *start_color,const PixelPacket *stop_color)
{
const char
*artifact;
DrawInfo
*draw_info;
GradientInfo
*gradient;
MagickBooleanType
status;
register ssize_t
i;
/*
Set gradient start-stop end points.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(start_color != (const PixelPacket *) NULL);
assert(stop_color != (const PixelPacket *) NULL);
draw_info=AcquireDrawInfo();
gradient=(&draw_info->gradient);
gradient->type=type;
gradient->bounding_box.width=image->columns;
gradient->bounding_box.height=image->rows;
artifact=GetImageArtifact(image,"gradient:bounding-box");
if (artifact != (const char *) NULL)
(void) ParseAbsoluteGeometry(artifact,&gradient->bounding_box);
gradient->gradient_vector.x2=(double) image->columns-1;
gradient->gradient_vector.y2=(double) image->rows-1;
artifact=GetImageArtifact(image,"gradient:direction");
if (artifact != (const char *) NULL)
{
GravityType
direction;
direction=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,artifact);
switch (direction)
{
case NorthWestGravity:
{
gradient->gradient_vector.x1=(double) image->columns-1;
gradient->gradient_vector.y1=(double) image->rows-1;
gradient->gradient_vector.x2=0.0;
gradient->gradient_vector.y2=0.0;
break;
}
case NorthGravity:
{
gradient->gradient_vector.x1=0.0;
gradient->gradient_vector.y1=(double) image->rows-1;
gradient->gradient_vector.x2=0.0;
gradient->gradient_vector.y2=0.0;
break;
}
case NorthEastGravity:
{
gradient->gradient_vector.x1=0.0;
gradient->gradient_vector.y1=(double) image->rows-1;
gradient->gradient_vector.x2=(double) image->columns-1;
gradient->gradient_vector.y2=0.0;
break;
}
case WestGravity:
{
gradient->gradient_vector.x1=(double) image->columns-1;
gradient->gradient_vector.y1=0.0;
gradient->gradient_vector.x2=0.0;
gradient->gradient_vector.y2=0.0;
break;
}
case EastGravity:
{
gradient->gradient_vector.x1=0.0;
gradient->gradient_vector.y1=0.0;
gradient->gradient_vector.x2=(double) image->columns-1;
gradient->gradient_vector.y2=0.0;
break;
}
case SouthWestGravity:
{
gradient->gradient_vector.x1=(double) image->columns-1;
gradient->gradient_vector.y1=0.0;
gradient->gradient_vector.x2=0.0;
gradient->gradient_vector.y2=(double) image->rows-1;
break;
}
case SouthGravity:
{
gradient->gradient_vector.x1=0.0;
gradient->gradient_vector.y1=0.0;
gradient->gradient_vector.x2=0.0;
gradient->gradient_vector.y2=(double) image->columns-1;
break;
}
case SouthEastGravity:
{
gradient->gradient_vector.x1=0.0;
gradient->gradient_vector.y1=0.0;
gradient->gradient_vector.x2=(double) image->columns-1;
gradient->gradient_vector.y2=(double) image->rows-1;
break;
}
default:
break;
}
}
artifact=GetImageArtifact(image,"gradient:angle");
if (artifact != (const char *) NULL)
gradient->angle=(MagickRealType) StringToDouble(artifact,(char **) NULL);
artifact=GetImageArtifact(image,"gradient:vector");
if (artifact != (const char *) NULL)
(void) sscanf(artifact,"%lf%*[ ,]%lf%*[ ,]%lf%*[ ,]%lf",
&gradient->gradient_vector.x1,&gradient->gradient_vector.y1,
&gradient->gradient_vector.x2,&gradient->gradient_vector.y2);
if ((GetImageArtifact(image,"gradient:angle") == (const char *) NULL) &&
(GetImageArtifact(image,"gradient:direction") == (const char *) NULL) &&
(GetImageArtifact(image,"gradient:extent") == (const char *) NULL) &&
(GetImageArtifact(image,"gradient:vector") == (const char *) NULL))
if ((type == LinearGradient) && (gradient->gradient_vector.y2 != 0.0))
gradient->gradient_vector.x2=0.0;
gradient->center.x=(double) gradient->gradient_vector.x2/2.0;
gradient->center.y=(double) gradient->gradient_vector.y2/2.0;
artifact=GetImageArtifact(image,"gradient:center");
if (artifact != (const char *) NULL)
(void) sscanf(artifact,"%lf%*[ ,]%lf",&gradient->center.x,
&gradient->center.y);
artifact=GetImageArtifact(image,"gradient:angle");
if ((type == LinearGradient) && (artifact != (const char *) NULL))
{
double
sine,
cosine,
distance;
/*
Reference https://drafts.csswg.org/css-images-3/#linear-gradients.
*/
sine=sin((double) DegreesToRadians(gradient->angle-90.0));
cosine=cos((double) DegreesToRadians(gradient->angle-90.0));
distance=fabs((double) (image->columns-1)*cosine)+
fabs((double) (image->rows-1)*sine);
gradient->gradient_vector.x1=0.5*((image->columns-1)-distance*cosine);
gradient->gradient_vector.y1=0.5*((image->rows-1)-distance*sine);
gradient->gradient_vector.x2=0.5*((image->columns-1)+distance*cosine);
gradient->gradient_vector.y2=0.5*((image->rows-1)+distance*sine);
}
gradient->radii.x=(double) MagickMax((image->columns-1),(image->rows-1))/2.0;
gradient->radii.y=gradient->radii.x;
artifact=GetImageArtifact(image,"gradient:extent");
if (artifact != (const char *) NULL)
{
if (LocaleCompare(artifact,"Circle") == 0)
{
gradient->radii.x=(double) (MagickMax((image->columns-1),
(image->rows-1)))/2.0;
gradient->radii.y=gradient->radii.x;
}
if (LocaleCompare(artifact,"Diagonal") == 0)
{
gradient->radii.x=(double) (sqrt((double) (image->columns-1)*
(image->columns-1)+(image->rows-1)*(image->rows-1)))/2.0;
gradient->radii.y=gradient->radii.x;
}
if (LocaleCompare(artifact,"Ellipse") == 0)
{
gradient->radii.x=(double) (image->columns-1)/2.0;
gradient->radii.y=(double) (image->rows-1)/2.0;
}
if (LocaleCompare(artifact,"Maximum") == 0)
{
gradient->radii.x=(double) MagickMax((image->columns-1),
(image->rows-1))/2.0;
gradient->radii.y=gradient->radii.x;
}
if (LocaleCompare(artifact,"Minimum") == 0)
{
gradient->radii.x=(double) MagickMin((image->columns-1),
(image->rows-1))/2.0;
gradient->radii.y=gradient->radii.x;
}
}
artifact=GetImageArtifact(image,"gradient:radii");
if (artifact != (const char *) NULL)
(void) sscanf(artifact,"%lf%*[ ,]%lf",&gradient->radii.x,
&gradient->radii.y);
gradient->radius=MagickMax(gradient->radii.x,gradient->radii.y);
gradient->spread=method;
/*
Define the gradient to fill between the stops.
*/
gradient->number_stops=2;
gradient->stops=(StopInfo *) AcquireQuantumMemory(gradient->number_stops,
sizeof(*gradient->stops));
if (gradient->stops == (StopInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) ResetMagickMemory(gradient->stops,0,gradient->number_stops*
sizeof(*gradient->stops));
for (i=0; i < (ssize_t) gradient->number_stops; i++)
GetMagickPixelPacket(image,&gradient->stops[i].color);
SetMagickPixelPacket(image,start_color,(IndexPacket *) NULL,
&gradient->stops[0].color);
gradient->stops[0].offset=0.0;
SetMagickPixelPacket(image,stop_color,(IndexPacket *) NULL,
&gradient->stops[1].color);
gradient->stops[1].offset=1.0;
/*
Draw a gradient on the image.
*/
status=DrawGradientImage(image,draw_info);
draw_info=DestroyDrawInfo(draw_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O i l P a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OilPaintImage() applies a special effect filter that simulates an oil
% painting. Each pixel is replaced by the most frequent color occurring
% in a circular region defined by radius.
%
% The format of the OilPaintImage method is:
%
% Image *OilPaintImage(const Image *image,const double radius,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the circular neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
static size_t **DestroyHistogramThreadSet(size_t **histogram)
{
register ssize_t
i;
assert(histogram != (size_t **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (histogram[i] != (size_t *) NULL)
histogram[i]=(size_t *) RelinquishMagickMemory(histogram[i]);
histogram=(size_t **) RelinquishMagickMemory(histogram);
return(histogram);
}
static size_t **AcquireHistogramThreadSet(const size_t count)
{
register ssize_t
i;
size_t
**histogram,
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
histogram=(size_t **) AcquireQuantumMemory(number_threads,
sizeof(*histogram));
if (histogram == (size_t **) NULL)
return((size_t **) NULL);
(void) ResetMagickMemory(histogram,0,number_threads*sizeof(*histogram));
for (i=0; i < (ssize_t) number_threads; i++)
{
histogram[i]=(size_t *) AcquireQuantumMemory(count,
sizeof(**histogram));
if (histogram[i] == (size_t *) NULL)
return(DestroyHistogramThreadSet(histogram));
}
return(histogram);
}
MagickExport Image *OilPaintImage(const Image *image,const double radius,
ExceptionInfo *exception)
{
#define NumberPaintBins 256
#define OilPaintImageTag "OilPaint/Image"
CacheView
*image_view,
*paint_view;
Image
*linear_image,
*paint_image;
MagickBooleanType
status;
MagickOffsetType
progress;
size_t
**magick_restrict histograms,
width;
ssize_t
y;
/*
Initialize painted image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=GetOptimalKernelWidth2D(radius,0.5);
linear_image=CloneImage(image,0,0,MagickTrue,exception);
paint_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if ((linear_image == (Image *) NULL) || (paint_image == (Image *) NULL))
{
if (linear_image != (Image *) NULL)
linear_image=DestroyImage(linear_image);
if (paint_image != (Image *) NULL)
linear_image=DestroyImage(paint_image);
return((Image *) NULL);
}
if (SetImageStorageClass(paint_image,DirectClass) == MagickFalse)
{
InheritException(exception,&paint_image->exception);
linear_image=DestroyImage(linear_image);
paint_image=DestroyImage(paint_image);
return((Image *) NULL);
}
histograms=AcquireHistogramThreadSet(NumberPaintBins);
if (histograms == (size_t **) NULL)
{
linear_image=DestroyImage(linear_image);
paint_image=DestroyImage(paint_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Oil paint image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(linear_image,exception);
paint_view=AcquireAuthenticCacheView(paint_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(linear_image,paint_image,linear_image->rows,1)
#endif
for (y=0; y < (ssize_t) linear_image->rows; y++)
{
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict paint_indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
register size_t
*histogram;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t)
(width/2L),linear_image->columns+width,width,exception);
q=QueueCacheViewAuthenticPixels(paint_view,0,y,paint_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
paint_indexes=GetCacheViewAuthenticIndexQueue(paint_view);
histogram=histograms[GetOpenMPThreadId()];
for (x=0; x < (ssize_t) linear_image->columns; x++)
{
register ssize_t
i,
u;
size_t
count;
ssize_t
j,
k,
v;
/*
Assign most frequent color.
*/
i=0;
j=0;
count=0;
(void) ResetMagickMemory(histogram,0,NumberPaintBins*sizeof(*histogram));
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
k=(ssize_t) ScaleQuantumToChar(ClampToQuantum(GetPixelIntensity(
linear_image,p+u+i)));
histogram[k]++;
if (histogram[k] > count)
{
j=i+u;
count=histogram[k];
}
}
i+=(ssize_t) (linear_image->columns+width);
}
*q=(*(p+j));
if (linear_image->colorspace == CMYKColorspace)
SetPixelIndex(paint_indexes+x,GetPixelIndex(indexes+x+j));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(paint_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_OilPaintImage)
#endif
proceed=SetImageProgress(image,OilPaintImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
paint_view=DestroyCacheView(paint_view);
image_view=DestroyCacheView(image_view);
histograms=DestroyHistogramThreadSet(histograms);
linear_image=DestroyImage(linear_image);
if (status == MagickFalse)
paint_image=DestroyImage(paint_image);
return(paint_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O p a q u e P a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OpaquePaintImage() changes any pixel that matches color with the color
% defined by fill.
%
% By default color must match a particular pixel color exactly. However,
% in many cases two colors may differ by a small amount. Fuzz defines
% how much tolerance is acceptable to consider two colors as the same.
% For example, set fuzz to 10 and the color red at intensities of 100 and
% 102 respectively are now interpreted as the same color.
%
% The format of the OpaquePaintImage method is:
%
% MagickBooleanType OpaquePaintImage(Image *image,
% const PixelPacket *target,const PixelPacket *fill,
% const MagickBooleanType invert)
% MagickBooleanType OpaquePaintImageChannel(Image *image,
% const ChannelType channel,const PixelPacket *target,
% const PixelPacket *fill,const MagickBooleanType invert)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel(s).
%
% o target: the RGB value of the target color.
%
% o fill: the replacement color.
%
% o invert: paint any pixel that does not match the target color.
%
*/
MagickExport MagickBooleanType OpaquePaintImage(Image *image,
const MagickPixelPacket *target,const MagickPixelPacket *fill,
const MagickBooleanType invert)
{
return(OpaquePaintImageChannel(image,CompositeChannels,target,fill,invert));
}
MagickExport MagickBooleanType OpaquePaintImageChannel(Image *image,
const ChannelType channel,const MagickPixelPacket *target,
const MagickPixelPacket *fill,const MagickBooleanType invert)
{
#define OpaquePaintImageTag "Opaque/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
conform_fill,
conform_target,
zero;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(target != (MagickPixelPacket *) NULL);
assert(fill != (MagickPixelPacket *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
exception=(&image->exception);
ConformMagickPixelPacket(image,fill,&conform_fill,exception);
ConformMagickPixelPacket(image,target,&conform_target,exception);
/*
Make image color opaque.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickPixelPacket
pixel;
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,q,indexes+x,&pixel);
if (IsMagickColorSimilar(&pixel,&conform_target) != invert)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(conform_fill.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(conform_fill.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(conform_fill.blue));
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,ClampToQuantum(conform_fill.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,ClampToQuantum(conform_fill.index));
}
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_OpaquePaintImageChannel)
#endif
proceed=SetImageProgress(image,OpaquePaintImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s p a r e n t P a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransparentPaintImage() changes the opacity value associated with any pixel
% that matches color to the value defined by opacity.
%
% By default color must match a particular pixel color exactly. However,
% in many cases two colors may differ by a small amount. Fuzz defines
% how much tolerance is acceptable to consider two colors as the same.
% For example, set fuzz to 10 and the color red at intensities of 100 and
% 102 respectively are now interpreted as the same color.
%
% The format of the TransparentPaintImage method is:
%
% MagickBooleanType TransparentPaintImage(Image *image,
% const MagickPixelPacket *target,const Quantum opacity,
% const MagickBooleanType invert)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o target: the target color.
%
% o opacity: the replacement opacity value.
%
% o invert: paint any pixel that does not match the target color.
%
*/
MagickExport MagickBooleanType TransparentPaintImage(Image *image,
const MagickPixelPacket *target,const Quantum opacity,
const MagickBooleanType invert)
{
#define TransparentPaintImageTag "Transparent/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
zero;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(target != (MagickPixelPacket *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
/*
Make image color transparent.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
GetMagickPixelPacket(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickPixelPacket
pixel;
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,q,indexes+x,&pixel);
if (IsMagickColorSimilar(&pixel,target) != invert)
q->opacity=opacity;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TransparentPaintImage)
#endif
proceed=SetImageProgress(image,TransparentPaintImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s p a r e n t P a i n t I m a g e C h r o m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransparentPaintImageChroma() changes the opacity value associated with any
% pixel that matches color to the value defined by opacity.
%
% As there is one fuzz value for the all the channels, the
% TransparentPaintImage() API is not suitable for the operations like chroma,
% where the tolerance for similarity of two color component (RGB) can be
% different, Thus we define this method take two target pixels (one
% low and one hight) and all the pixels of an image which are lying between
% these two pixels are made transparent.
%
% The format of the TransparentPaintImage method is:
%
% MagickBooleanType TransparentPaintImage(Image *image,
% const MagickPixelPacket *low,const MagickPixelPacket *hight,
% const Quantum opacity,const MagickBooleanType invert)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o low: the low target color.
%
% o high: the high target color.
%
% o opacity: the replacement opacity value.
%
% o invert: paint any pixel that does not match the target color.
%
*/
MagickExport MagickBooleanType TransparentPaintImageChroma(Image *image,
const MagickPixelPacket *low,const MagickPixelPacket *high,
const Quantum opacity,const MagickBooleanType invert)
{
#define TransparentPaintImageTag "Transparent/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(high != (MagickPixelPacket *) NULL);
assert(low != (MagickPixelPacket *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,ResetAlphaChannel);
/*
Make image color transparent.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
match;
MagickPixelPacket
pixel;
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
GetMagickPixelPacket(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,q,indexes+x,&pixel);
match=((pixel.red >= low->red) && (pixel.red <= high->red) &&
(pixel.green >= low->green) && (pixel.green <= high->green) &&
(pixel.blue >= low->blue) && (pixel.blue <= high->blue)) ? MagickTrue : MagickFalse;
if (match != invert)
q->opacity=opacity;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TransparentPaintImageChroma)
#endif
proceed=SetImageProgress(image,TransparentPaintImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
convolution_1x1_pack4to8_fp16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_sgemm_pack4to8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
const int size = w * h;
Mat bottom_im2col = bottom_blob;
bottom_im2col.w = size;
bottom_im2col.h = 1;
im2col_sgemm_pack4to8_fp16sa_neon(bottom_im2col, top_blob, kernel, _bias, opt);
}
static void conv1x1s2_sgemm_pack4to8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = (w - 2 * outw + w) * 4;
Mat bottom_blob_shrinked;
bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < channels; p++)
{
const __fp16* r0 = bottom_blob.channel(p);
__fp16* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
float16x4_t _v0 = vld1_f16(r0);
float16x4_t _v1 = vld1_f16(r0 + 8);
float16x4_t _v2 = vld1_f16(r0 + 16);
float16x4_t _v3 = vld1_f16(r0 + 24);
vst1_f16(outptr, _v0);
vst1_f16(outptr + 4, _v1);
vst1_f16(outptr + 8, _v2);
vst1_f16(outptr + 12, _v3);
r0 += 32;
outptr += 16;
}
for (; j + 1 < outw; j += 2)
{
float16x4_t _v0 = vld1_f16(r0);
float16x4_t _v1 = vld1_f16(r0 + 8);
vst1_f16(outptr, _v0);
vst1_f16(outptr + 4, _v1);
r0 += 16;
outptr += 8;
}
for (; j < outw; j++)
{
float16x4_t _v = vld1_f16(r0);
vst1_f16(outptr, _v);
r0 += 8;
outptr += 4;
}
r0 += tailstep;
}
}
conv1x1s1_sgemm_pack4to8_fp16sa_neon(bottom_blob_shrinked, top_blob, kernel, _bias, opt);
}
|
exemplo_for.c | #include "exemplos.h"
//exemplo For
int main(int argc, char **argv)
{
int i, thread_id, nloops;
#pragma omp parallel private(thread_id, nloops)
{
nloops = 0;
thread_id = omp_get_thread_num();
#pragma omp for
for (i = 0; i < 1000; ++i)
{
if (nloops == 0)
printf("Thread %d started with i=%d\n", thread_id, i);
++nloops;
}
thread_id = omp_get_thread_num();
printf("Thread %d performed %d iterations of the loop.\n",
thread_id, nloops);
}
return 0;
} |
compression.h | //
// Created by Bangtian Liu on 6/30/19.
//
#ifndef PROJECT_COMPRESSION_H
#define PROJECT_COMPRESSION_H
#include <vector>
#include <algorithm>
#include <random>
#include <cstring>
#include "../sympiler/HMatrix.h"
#include "HTree.h"
#include "../sympiler/HTree.h"
using namespace Sympiler::Internal;
using namespace Sympiler;
struct cretvalue {
int *skels;
int skels_length;
double *proj;
int proj_column;
};
struct DDcost {
int index;
unsigned long cost;
};
bool fcompare(DDcost lhs, DDcost rhs) {
return lhs.cost > rhs.cost;
}
typedef cretvalue ret;
using namespace std;
void Fsubmatrix(std::vector<int> &amap, std::vector<int> &bmap, double *submatrix, double *X, int n, int d,
Internal::Ktype ktype,
double h) {
switch (ktype) {
case Internal::KS_GAUSSIAN: {
double *source = (double *) mkl_malloc(sizeof(double) * bmap.size() * d, 64);
double *target = (double *) mkl_malloc(sizeof(double) * amap.size() * d, 64);
#pragma omp parallel for
for (int i = 0; i < bmap.size(); i++) {
for (int j = 0; j < d; j++) {
source[i * d + j] = X[bmap[i] * d + j];
}
}
#pragma omp parallel for
for (int i = 0; i < amap.size(); i++) {
for (int j = 0; j < d; j++) {
target[i * d + j] = X[amap[i] * d + j];
}
}
cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans,
amap.size(), bmap.size(), d, -2.0,
target, d,
source, d, 0.0,
submatrix, amap.size());
double *target_sqnorms = (double *) mkl_malloc(sizeof(double) * amap.size(), 64);
double *source_sqnorms = (double *) mkl_malloc(sizeof(double) * bmap.size(), 64);
#pragma omp parallel for
for (int i = 0; i < amap.size(); i++) {
target_sqnorms[i] = cblas_ddot(d,
target + i * d, 1,
target + i * d, 1);
}
for (int i = 0; i < bmap.size(); i++) {
source_sqnorms[i] = cblas_ddot(d,
source + i * d, 1,
source + i * d, 1);
}
#pragma omp parallel for
for (int j = 0; j < bmap.size(); j++) {
for (int i = 0; i < amap.size(); i++)
submatrix[j * amap.size() + i] += target_sqnorms[i] + source_sqnorms[j];
}
double kscal = -0.5 / (h * h);
#pragma omp parallel for
for (int i = 0; i < amap.size() * bmap.size(); i++) {
submatrix[i] = std::exp(kscal * submatrix[i]);
}
break;
}
case Internal::KS_LOG: {
//#pragma omp parallel for
// for (int i = 0; i < amap.size() * bmap.size(); i++) {
// submatrix[i] = -0.5 * log(submatrix[i]);
// }
break;
}
case Internal::KS_EXPONENTIAL: {
//#pragma omp parallel for
// for (int i = 0; i < amap.size() * bmap.size(); i++) {
// submatrix[i] = exp(-sqrt(submatrix[i]));;
// }
break;
}
case Internal::KS_NEWTON: {
#pragma omp parallel for
for (int j = 0; j < bmap.size(); j++) {
for (int i = 0; i < amap.size(); i++) {
auto Kij = 0.0;
for (int k = 0; k < d; ++k) {
auto col = bmap[j];
auto row = amap[i];
auto tar = X[col * d + k];
auto src = X[row * d + k];
Kij += (tar - src) * (tar - src);
}
if(Kij==0)Kij=1;
submatrix[j * amap.size() + i] = 1/sqrt(Kij);
}
}
//#pragma omp parallel for
// for (int i = 0; i < amap.size() * bmap.size(); i++) {
// if(submatrix[i]==0) submatrix[i] = 1;
// else submatrix[i] = 1/std::sqrt(submatrix[i]);
// }
break;
}
default: {
printf("invalid kernel type\n");
exit(1);
break;
}
}
}
//
//void DDDFsubmatrix(std::vector<int> &amap, std::vector<int> &bmap, double *submatrix, double *X, int n, int d, Internal::Ktype ktype,
// double h)
//{
//
// double *source = (double *)mkl_malloc(sizeof(double)*bmap.size()*d, 64);
// double *target = (double *)mkl_malloc(sizeof(double)*amap.size()*d, 64);
//
//#pragma omp parallel for
// for(int i=0; i<bmap.size(); i++)
// {
// for(int j=0; j<d; j++)
// {
// source[i*d + j] = X[bmap[i]*d+j];
// }
// }
//
//#pragma omp parallel for
// for(int i=0; i<amap.size(); i++) {
// for (int j = 0; j < d; j++) {
// target[i*d + j] = X[amap[i]*d+j];
// }
// }
//
// cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans,
// amap.size(), bmap.size(), d, -2.0,
// target, d,
// source, d, 0.0,
// submatrix, amap.size());
//
// double *target_sqnorms = (double *)mkl_malloc(sizeof(double)*amap.size(), 64);
// double *source_sqnorms = (double *)mkl_malloc(sizeof(double)*bmap.size(), 64);
//
//#pragma omp parallel for
// for(int i=0; i<amap.size(); i++)
// {
// target_sqnorms[i] = cblas_ddot(d,
// target+i*d, 1,
// target+i*d, 1);
// }
//
// for(int i=0; i<bmap.size(); i++)
// {
// source_sqnorms[i] = cblas_ddot(d,
// source+i*d,1,
// source+i*d,1);
// }
//#pragma omp parallel for
// for(int j=0; j<bmap.size(); j++)
// {
// for(int i=0; i<amap.size(); i++)
// submatrix[j*amap.size() + i] += target_sqnorms[i] + source_sqnorms[j];
// }
//
// switch (ktype) {
// case Internal::KS_GAUSSIAN: {
//
// double kscal = -0.5/(h * h);
//#pragma omp parallel for
// for(int i=0; i<amap.size()*bmap.size(); i++)
// {
// submatrix[i] = 1.0;
// }
//
// break;
// }
//
// case Internal::KS_LOG: {
//#pragma omp parallel for
// for(int i=0; i<amap.size()*bmap.size(); i++)
// {
// submatrix[i] = -0.5 * log(submatrix[i]);
// }
//
// break;
// }
//
// case Internal::KS_EXPONENTIAL: {
//#pragma omp parallel for
// for(int i=0; i<amap.size()*bmap.size(); i++)
// {
// submatrix[i] = exp(-sqrt(submatrix[i]));;
// }
// break;
// }
//
// case Internal::KS_NEWTON: {
//
//#pragma omp parallel for
// for(int i=0; i<amap.size()*bmap.size(); i++)
// {
// submatrix[i] = std::sqrt(submatrix[i]);
// }
//
// break;
// }
//
// default: {
// printf("invalid kernel type\n");
// exit(1);
// break;
// }
// }
//
//}
void Fsubmatrix(int *amap, int lena, int *bmap, int lenb, double *submatrix, Internal::Ktype ktype, double *X, int d,
double h) {
switch (ktype) {
case Internal::KS_GAUSSIAN: {
double *source = (double *) mkl_malloc(sizeof(double) * lenb * d, 64);
double *target = (double *) mkl_malloc(sizeof(double) * lena * d, 64);
#pragma omp parallel for
for (int i = 0; i < lenb; i++) {
for (int j = 0; j < d; j++) {
source[i * d + j] = X[bmap[i] * d + j];
}
}
#pragma omp parallel for
for (int i = 0; i < lena; i++) {
for (int j = 0; j < d; j++) {
target[i * d + j] = X[amap[i] * d + j];
}
}
cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans,
lena, lenb, d, -2.0,
target, d,
source, d, 0.0,
submatrix, lena);
double *target_sqnorms = (double *) mkl_malloc(sizeof(double) * lena, 64);
double *source_sqnorms = (double *) mkl_malloc(sizeof(double) * lenb, 64);
#pragma omp parallel for
for (int i = 0; i < lena; i++) {
target_sqnorms[i] = cblas_ddot(d,
target + i * d, 1,
target + i * d, 1);
}
#pragma omp parallel for
for (int i = 0; i < lenb; i++) {
source_sqnorms[i] = cblas_ddot(d,
source + i * d, 1,
source + i * d, 1);
}
#pragma omp parallel for
for (int j = 0; j < lenb; j++) {
for (int i = 0; i < lena; i++)
submatrix[j * lena + i] += target_sqnorms[i] + source_sqnorms[j];
}
double kscal = -0.5 / (h * h);
#pragma omp parallel for
for (int i = 0; i < lena * lenb; i++) {
submatrix[i] = std::exp(kscal * submatrix[i]);
}
break;
}
case Internal::KS_LOG: {
//#pragma omp parallel for
// for (int i = 0; i < lena * lenb; i++) {
// submatrix[i] = -0.5 * log(submatrix[i]);
// }
break;
}
case Internal::KS_EXPONENTIAL: {
//#pragma omp parallel for
// for (int i = 0; i < lena * lenb; i++) {
// submatrix[i] = exp(-sqrt(submatrix[i]));;
// }
// break;
}
case Internal::KS_NEWTON: {
#pragma omp parallel for
for (int j = 0; j < lenb; j++) {
for (int i = 0; i < lena; i++) {
auto Kij = 0.0;
for (int k = 0; k < d; ++k) {
auto col = bmap[j];
auto row = amap[i];
auto tar = X[col * d + k];
auto src = X[row * d + k];
Kij += (tar - src) * (tar - src);
}
if(Kij==0)Kij=1;
submatrix[j * lena + i] = 1/sqrt(Kij);
}
}
break;
}
default: {
printf("invalid kernel type\n");
exit(1);
break;
}
}
}
void BuildNeighBorsLeaf(HTree &tree, int idx, int nsamples) {
auto &pnids = tree.pnids[idx];
auto &snids = tree.snids[idx];
int k = 32;
auto &NN = tree.NN;
auto lids = tree.lids + tree.lidsoffset[idx];
int n = tree.lidslen[idx];
// printf("len=%d\n",n);
pnids = std::unordered_set<int>(); // will modify
for (int ii = 0; ii < k / 2; ii++) {
for (int jj = 0; jj < n; jj++) {
auto idx = NN[lids[jj] * k + ii].second;
pnids.insert(idx);
// printf("%lu;",NN[ lids[jj] * k + ii].second);
}
}
for (int i = 0; i < n; i++) {
pnids.erase(lids[i]);
}
// printf("Leaf Size of pruning neighbor set: %lu \n", pnids.size());
snids = std::map<int, double>();
std::vector<std::pair<double, int>> tmp(k / 2 * n);
std::set<int> nodeIdx(lids, lids + n);
// Allocate array for sorting
for (int ii = (k + 1) / 2; ii < k; ii++) {
for (int jj = 0; jj < n; jj++) {
tmp[(ii - (k + 1) / 2) * n + jj] = NN[lids[jj] * k + ii];
}
}
std::sort(tmp.begin(), tmp.end());
int i = 0;
while (snids.size() < nsamples && i < (k - 1) * n / 2) {
if (!pnids.count(tmp[i].second) && !nodeIdx.count(tmp[i].second)) {
snids.insert(std::pair<int, double>(tmp[i].second, tmp[i].first));
}
i++;
}
}
void BuildNeighBorsInternal(HTree &tree, int idx, int nsamples) {
auto &pnids = tree.pnids[idx];
auto &snids = tree.snids[idx];
int k = 32;
auto &NN = tree.NN;
auto lids = tree.lids + tree.lidsoffset[idx];
int n = tree.lidslen[idx];
auto &lsnids = tree.snids[tree.tlchildren[idx]];
auto &rsnids = tree.snids[tree.trchildren[idx]];
auto &lpnids = tree.pnids[tree.tlchildren[idx]];
auto &rpnids = tree.pnids[tree.trchildren[idx]];
snids = lsnids;
for (auto cur = rsnids.begin(); cur != rsnids.end(); cur++) {
auto ret = snids.insert(*cur);
if (ret.second == false) {
// Update distance?
if (ret.first->second > (*cur).first) {
ret.first->second = (*cur).first;
}
}
}
// Remove "own" points
for (int i = 0; i < n; i++) {
snids.erase(lids[i]);
}
// Remove pruning neighbors from left and right
for (auto cur = lpnids.begin(); cur != lpnids.end(); cur++) {
snids.erase(*cur);
}
for (auto cur = rpnids.begin(); cur != rpnids.end(); cur++) {
snids.erase(*cur);
}
}
int decomposition(double *A, int nRows, int nCols, double tolerance, int **skels, double **proj, int **jpvt) {
assert(nRows > nCols);
int s;
int maxRank = 256;
// printf("maxRank=%d\n",maxRank);
*jpvt = (int *) malloc(sizeof(int) * nCols);
memset(*jpvt, 0, sizeof(int) * nCols);
// T *tau = GenMatrix<T>(std::min(nRows,nCols),1);
double *tau = (double *) mkl_malloc(sizeof(double) * std::min(nRows, nCols), 64);
auto info = LAPACKE_dgeqp3(LAPACK_COL_MAJOR, nRows, nCols, A, nRows, *jpvt, tau);
if (info != 0) {
printf("%d-th parameter had an illegal value", -info);
}
#pragma omp parallel for
for (int i = 0; i < nCols; ++i) {
(*jpvt)[i] = (*jpvt)[i] - 1;
}
for (s = 1; s < nCols; ++s) {
// printf("s=%d, a=%e, error=%e nCOls=%d\n",s, A[s*nRows+s],tolerance,nCols);
if (s > maxRank || std::abs(A[s * nRows + s]/A[0]) < tolerance) break;
}
// if(!setup.adaptive)
// {
// s = std::min(maxRank, nCols);
// }
if (s > maxRank) s = maxRank;
*skels = (int *) malloc(sizeof(int) * s);
memcpy(*skels, *jpvt, sizeof(int) * s);
// memcpy(*skels,*proj, sizeof(int)*s);
// *proj = GenMatrix<T>(s,nCols);
*proj = (double *) mkl_malloc(sizeof(double) * s * nCols, 64);
memset(*proj, 0, sizeof(double) * s * nCols);
//#pragma omp parallel for
for (int j = 0; j < nCols; j++) {
for (int i = 0; i < s; i++) {
if (j < s) {
if (j >= i) (*proj)[j * s + i] = A[j * nRows + i];
else (*proj)[j * s + i] = 0.0;
} else {
(*proj)[j * s + i] = A[j * nRows + i];
}
}
}
if ((*proj)[0] == 0) return s; // put on here
double *R1 = (double *) mkl_malloc(sizeof(double) * s * s, 64);
memset(R1, 0, sizeof(double) * s * s); // todo check the segment fault bug here
//#pragma omp parallel for
for (int j = 0; j < s; j++) {
for (int i = 0; i < s; i++) {
if (i <= j) R1[j * s + i] = (*proj)[j * s + i];
// if((*proj)[j*s+i]!=(*proj)[j*s+i])printf("NAN FOUND1!!!\n");
}
}
// T *tmp = GenMatrix<T>(s,nCols);
double *tmp = (double *) mkl_malloc(sizeof(double) * s * nCols, 64);
memcpy(tmp, *proj, sizeof(double) * s * nCols);
cblas_dtrsm(CblasColMajor, CblasLeft, CblasUpper, CblasNoTrans, CblasNonUnit, s, nCols, 1.0, R1, s, tmp, s);
/** Fill in proj */
for (int j = 0; j < nCols; j++) {
for (int i = 0; i < s; i++) {
(*proj)[(*jpvt)[j] * s + i] = tmp[j * s + i];
}
}
return s;
}
void skeletonize_leaf(int idx, int *lids, int *lidslen, int *lidsoffset, ret *rtmp, int *sid, int *sidlen,
int *sidoffset, int *lc, int *rc, int m, int n, double *X, Internal::Ktype ktype, int dim,
double h,
double acc) {
std::vector<int> bmap;
bmap.insert(bmap.end(), lids + lidsoffset[idx], lids + lidsoffset[idx] + lidslen[idx]);
auto nsamples = 2 * bmap.size();
auto numpoints = lidslen[idx];
auto clids = lids + lidsoffset[idx];
nsamples = (nsamples < 2 * m) ? 2 * m : nsamples;
int slen = sidlen[idx];
int offset = sidoffset[idx];
// printf("idx =%d slen=%d\n", idx, slen);
// add sampling points
std::vector<int> amap;
// (sid+offset, sid+offset+slen);
mt19937 generator(idx);
uniform_int_distribution<> uniform_distribution(0, n - 1);
if (nsamples < (n - numpoints)) {
amap.assign(sid + offset, sid + offset + slen);
while (amap.size() < nsamples) {
// auto sample = rand() % setup.n;
auto sample = uniform_distribution(generator);
if (std::find(amap.begin(), amap.end(), sample) == amap.end() &&
std::find(clids, clids + numpoints, sample) == (clids + numpoints)) {
amap.push_back(sample);
}
}
} else {
for (int sample = 0; sample < n; sample++) // TODO: may can be improved here
{
if (std::find(amap.begin(), amap.end(), sample) == amap.end()) {
amap.push_back(sample);
}
}
}
auto Kab = (double *) malloc(sizeof(double) * amap.size() * bmap.size());
memset(Kab, 0, sizeof(double) * amap.size() * bmap.size());
Fsubmatrix(amap, bmap, Kab, X, n, dim, ktype, h);
auto N = n;
auto m1 = amap.size();
auto n1 = bmap.size();
auto q = numpoints;
// std::sqrt((m1 * n1 * 1.0) / (1.0 * N * (N - q))) *
auto tolerance = acc;
int *skels;
double *proj;
int *jpvt;
int s = decomposition(Kab, m1, n1, tolerance, &skels, &proj, &jpvt);
for (int i = 0; i < s; ++i) { // need to check it
skels[i] = bmap[skels[i]];
}
rtmp[idx].skels = skels;
rtmp[idx].skels_length = s;
rtmp[idx].proj = proj;
rtmp[idx].proj_column = (int) bmap.size();
free(Kab);
}
void skeletonize_leaf(int idx, HTree &tree, ret *rtmp, int m, int n, double *X, Internal::Ktype ktype, int dim,
double h, double acc) {
auto lc = tree.tlchildren;
auto rc = tree.trchildren;
auto lids = tree.lids;
auto lidslen = tree.lidslen;
auto lidsoffset = tree.lidsoffset;
std::vector<int> bmap;
bmap.insert(bmap.end(), lids + lidsoffset[idx], lids + lidsoffset[idx] + lidslen[idx]);
auto nsamples = 2 * bmap.size();
auto numpoints = lidslen[idx];
auto clids = lids + lidsoffset[idx];
nsamples = (nsamples < 2 * m) ? 2 * m : nsamples;
BuildNeighBorsLeaf(tree, idx, nsamples);
auto &snids = tree.snids[idx];
auto &pnids = tree.pnids[idx];
std::multimap<double, int> ordered_snids = flip_map(snids);
// add sampling points
std::vector<int> amap;
// (sid+offset, sid+offset+slen);
// mt19937 generator(idx);
// uniform_int_distribution<> uniform_distribution(0, n - 1);
// printf("idx =%d, len=%ld %ld\n", idx, ordered_snids.size(), nsamples);
if (nsamples < (n - numpoints)) {
amap.reserve(nsamples);
for (auto cur = ordered_snids.begin(); cur != ordered_snids.end(); cur++) {
amap.push_back(cur->second);
}
while (amap.size() < nsamples) {
auto sample = rand() % n;
// auto sample = uniform_distribution(generator);
if (std::find(amap.begin(), amap.end(), sample) == amap.end() &&
std::find(clids, clids + numpoints, sample) == (clids + numpoints)) {
amap.push_back(sample);
}
}
} else {
for (int sample = 0; sample < n; sample++) // TODO: may can be improved here
{
if (std::find(amap.begin(), amap.end(), sample) == amap.end()) {
amap.push_back(sample);
}
}
}
auto Kab = (double *) malloc(sizeof(double) * amap.size() * bmap.size());
memset(Kab, 0, sizeof(double) * amap.size() * bmap.size());
Fsubmatrix(amap, bmap, Kab, X, n, dim, ktype, h);
auto N = n;
auto m1 = amap.size();
auto n1 = bmap.size();
auto q = numpoints;
// double scal_tol = std::sqrt((double)n1/q) *std::sqrt((double)m1/(N-q)) * acc;
// auto tolerance = std::sqrt((double)q/N) * scal_tol;
auto tolerance = acc;
int *skels;
double *proj;
int *jpvt;
int s = decomposition(Kab, m1, n1, tolerance, &skels, &proj, &jpvt);
for (int i = 0; i < s; ++i) { // need to check it
skels[i] = bmap[skels[i]];
}
pnids.clear();
auto &NN = tree.NN;
for (int ii = 0; ii < s; ii++) {
for (int jj = 0; jj < 32 / 2; jj++) {
pnids.insert(NN.data()[skels[ii] * 32 + jj].second);
}
}
rtmp[idx].skels = skels;
rtmp[idx].skels_length = s;
rtmp[idx].proj = proj;
rtmp[idx].proj_column = (int) bmap.size();
free(Kab);
}
void skeletonize_internal(int idx, HTree &tree, ret *rtmp, int m, int n, double *X, Internal::Ktype ktype, int dim,
double h, double acc) {
auto lc = tree.tlchildren;
auto rc = tree.trchildren;
auto lids = tree.lids;
auto lidslen = tree.lidslen;
auto lidsoffset = tree.lidsoffset;
std::vector<int> bmap;
auto v = lc[idx];
bmap.insert(bmap.end(), rtmp[v].skels, rtmp[v].skels + rtmp[v].skels_length);
v = rc[idx];
bmap.insert(bmap.end(), rtmp[v].skels, rtmp[v].skels + rtmp[v].skels_length);
auto nsamples = 2 * bmap.size();
auto numpoints = lidslen[idx];
auto clids = lids + lidsoffset[idx];
nsamples = (nsamples < 2 * m) ? 2 * m : nsamples;
BuildNeighBorsInternal(tree, idx, nsamples);
auto &snids = tree.snids[idx];
auto &pnids = tree.pnids[idx];
std::multimap<double, int> ordered_snids = flip_map(snids);
std::vector<int> amap;
// (sid+offset, sid+offset+slen);
// mt19937 generator(idx);
// uniform_int_distribution<> uniform_distribution(0, n - 1);
// printf("idx=%d len=%ld %ld\n", idx, ordered_snids.size(), nsamples);
if (nsamples < (n - numpoints)) {
// printf("idx=%d, len=%d nsamples=%d\n", idx, ordered_snids.size(), nsamples);
amap.reserve(nsamples);
if(ordered_snids.size()>nsamples){
auto cur = ordered_snids.begin();
for(int k=0; k<nsamples; k++)
{
amap.push_back(cur->second);
cur++;
}
}
else {
for (auto cur = ordered_snids.begin(); cur != ordered_snids.end(); cur++) {
amap.push_back(cur->second);}
}
while (amap.size() < nsamples) {
auto sample = rand() % n;
// auto sample = uniform_distribution(generator);
if (std::find(amap.begin(), amap.end(), sample) == amap.end() &&
std::find(clids, clids + numpoints, sample) == (clids + numpoints)) {
amap.push_back(sample);
}
}
} else {
for (int sample = 0; sample < n; sample++) // TODO: may can be improved here
{
if (std::find(amap.begin(), amap.end(), sample) == amap.end()) {
amap.push_back(sample);
}
}
}
auto Kab = (double *) malloc(sizeof(double) * amap.size() * bmap.size());
memset(Kab, 0, sizeof(double) * amap.size() * bmap.size());
Fsubmatrix(amap, bmap, Kab, X, n, dim, ktype, h);
auto N = n;
auto m1 = amap.size();
auto n1 = bmap.size();
auto q = numpoints;
// double scal_tol = std::sqrt((double)n1/q) *std::sqrt((double)m1/(N-q)) * acc;
//
// auto tolerance = std::sqrt((double)q/N) * scal_tol;
auto tolerance = acc;
int *skels;
double *proj;
int *jpvt;
int s = decomposition(Kab, m1, n1, tolerance, &skels, &proj, &jpvt);
for (int i = 0; i < s; ++i) { // need to check it
skels[i] = bmap[skels[i]];
}
pnids.clear();
auto &NN = tree.NN;
for (int ii = 0; ii < s; ii++) {
for (int jj = 0; jj < 32 / 2; jj++) {
pnids.insert(NN.data()[skels[ii] * 32 + jj].second);
}
}
rtmp[idx].skels = skels;
rtmp[idx].skels_length = s;
rtmp[idx].proj = proj;
rtmp[idx].proj_column = (int) bmap.size();
// free(Kab);
}
void skeletonize_internal(int idx, int *lids, int *lidslen, int *lidsoffset, ret *rtmp, int *sid, int *sidlen,
int *sidoffset, int *lc, int *rc, int m, int n, double *X, Internal::Ktype ktype, int dim,
double h, double acc) {
std::vector<int> bmap;
auto v = lc[idx];
bmap.insert(bmap.end(), rtmp[v].skels, rtmp[v].skels + rtmp[v].skels_length);
v = rc[idx];
bmap.insert(bmap.end(), rtmp[v].skels, rtmp[v].skels + rtmp[v].skels_length);
auto nsamples = 2 * bmap.size();
auto numpoints = lidslen[idx];
auto clids = lids + lidsoffset[idx];
nsamples = (nsamples < 2 * m) ? 2 * m : nsamples;
int slen = sidlen[idx];
int offset = sidoffset[idx];
std::vector<int> amap;
mt19937 generator(idx);
uniform_int_distribution<> uniform_distribution(0, n - 1);
if (nsamples < (n - numpoints)) {
// printf("idx=%d, len=%d nsamples=%d\n", idx, slen, nsamples);
amap.assign(sid + offset, sid + offset + slen);
while (amap.size() < nsamples) {
// auto sample = rand() % setup.n;
auto sample = uniform_distribution(generator);
if (std::find(amap.begin(), amap.end(), sample) == amap.end() &&
std::find(clids, clids + numpoints, sample) == (clids + numpoints)) {
amap.push_back(sample);
}
}
} else {
for (int sample = 0; sample < n; sample++) // TODO: may can be improved here
{
if (std::find(amap.begin(), amap.end(), sample) == amap.end()) {
amap.push_back(sample);
}
}
}
auto Kab = (double *) malloc(sizeof(double) * amap.size() * bmap.size());
memset(Kab, 0, sizeof(double) * amap.size() * bmap.size());
Fsubmatrix(amap, bmap, Kab, X, n, dim, ktype, h);
auto N = n;
auto m1 = amap.size();
auto n1 = bmap.size();
auto q = numpoints;
auto tolerance = std::sqrt((m1 * n1 * 1.0) / (1.0 * N * (N - q))) * acc;
int *skels;
double *proj;
int *jpvt;
int s = decomposition(Kab, m1, n1, tolerance, &skels, &proj, &jpvt);
for (int i = 0; i < s; ++i) { // need to check it
skels[i] = bmap[skels[i]];
}
rtmp[idx].skels = skels;
rtmp[idx].skels_length = s;
rtmp[idx].proj = proj;
rtmp[idx].proj_column = (int) bmap.size();
free(Kab);
}
void skeletonize(int idx, int *lids, int *lidslen, int *lidsoffset, ret *rtmp, int *sid, int *sidlen,
int *sidoffset, int *lc, int *rc, int m, int n, double *X, Internal::Ktype ktype, int dim, double h) {
// if(idx==0){
// return;
// }
std::vector<int> bmap;
if (lc[idx] == -1) { // leaf node
bmap.insert(bmap.end(), lids + lidsoffset[idx], lids + lidsoffset[idx] + lidslen[idx]);
} else {
auto v = lc[idx];
bmap.insert(bmap.end(), rtmp[v].skels, rtmp[v].skels + rtmp[v].skels_length);
v = rc[idx];
bmap.insert(bmap.end(), rtmp[v].skels, rtmp[v].skels + rtmp[v].skels_length);
}
// printf("bmap size=%d len=%d\n",bmap.size(), lidslen[idx]);
auto nsamples = 2 * bmap.size();
auto numpoints = lidslen[idx];
auto clids = lids + lidsoffset[idx];
nsamples = (nsamples < 2 * m) ? 2 * m : nsamples;
int slen = sidlen[idx];
int offset = sidoffset[idx];
// add sampling points
std::vector<int> amap(sid + offset, sid + offset + slen);
mt19937 generator(idx);
uniform_int_distribution<> uniform_distribution(0, n - 1);
if (nsamples < (n - numpoints)) {
while (amap.size() < nsamples) {
// auto sample = rand() % setup.n;
auto sample = uniform_distribution(generator);
if (std::find(amap.begin(), amap.end(), sample) == amap.end() &&
std::find(clids, clids + numpoints, sample) == (clids + numpoints)) {
amap.push_back(sample);
}
}
} else {
for (int sample = 0; sample < n; sample++) // TODO: may can be improved here
{
if (std::find(amap.begin(), amap.end(), sample) == amap.end()) {
amap.push_back(sample);
}
}
}
auto Kab = (double *) malloc(sizeof(double) * amap.size() * bmap.size());
memset(Kab, 0, sizeof(double) * amap.size() * bmap.size());
Fsubmatrix(amap, bmap, Kab, X, n, dim, ktype, h);
auto N = n;
auto m1 = amap.size();
auto n1 = bmap.size();
auto q = numpoints;
double error = 1e-5;
// printf("m1=%d, n1=%d N=%d q=%d\n", m1, n1, N, q);
auto tolerance = std::sqrt((m1 * n1 * 1.0) / (1.0 * N * (N - q))) * error;
// printf("error=%lf\n", std::sqrt((m1*n1*1.0)/(1.0*N*(N-q))));
// auto tolerance = 1E-5;
int *skels;
double *proj;
int *jpvt;
int s = decomposition(Kab, m1, n1, tolerance, &skels, &proj, &jpvt);
// printf("s=%d\n",s);
for (int i = 0; i < s; ++i) { // need to check it
skels[i] = bmap[skels[i]];
}
rtmp[idx].skels = skels;
rtmp[idx].skels_length = s;
rtmp[idx].proj = proj;
rtmp[idx].proj_column = (int) bmap.size();
free(Kab);
}
void binpacking(std::vector<std::vector<int>> &wpartitions, std::vector<std::vector<int>> &owpartitions, int numofbins,
clustertree &ctree, HTree &tree, ret *rtmp) {
DDcost *ccost = new DDcost[wpartitions.size()];
for (auto i = 0; i < wpartitions.size(); i++) {
ccost[i].cost = 0;
ccost[i].index = i;
for (auto j = 0; j < wpartitions[i].size(); j++) {
auto idx = wpartitions[i][j];
unsigned long cost = 0;
if (tree.tlchildren[idx] == -1) {
cost += 2 * rtmp[idx].skels_length * rtmp[idx].proj_column;
// leafdim[leafmap.at(idx)] * setup.nrhs;
} else {
auto lc = tree.tlchildren[idx];
auto rc = tree.trchildren[idx];
cost += 2 * rtmp[idx].skels_length * rtmp[lc].skels_length;
cost += 2 * rtmp[idx].skels_length * rtmp[rc].skels_length;
//
// for(auto &v : children[idx])
// {
// cost += 2*tmpresult[idx].skels_length*tmpresult[v].skels_length*setup.nrhs;
// }
}
ccost[i].cost += cost;
}
}
std::sort(ccost, ccost + wpartitions.size(), fcompare);
uint64_t *ocost = new uint64_t[numofbins];
memset(ocost, 0, sizeof(uint64_t) * numofbins);
int partNo = wpartitions.size();
int minBin = 0;
for (int i = 0; i < partNo; i++) {
minBin = findMin(ocost, numofbins);
ocost[minBin] += ccost[i].cost;
int index = ccost[i].index;
//owpartition
owpartitions[minBin].insert(owpartitions[minBin].end(),
wpartitions[index].begin(), wpartitions[index].end());
}
}
void BalanceCoarLevelSet(clustertree &ctree, HTree &tree, ret *rtmp) {
auto &postw = ctree.postw;
auto &opostw = ctree.opostw;
auto len = postw.size();
opostw.resize(len);
// auto &pow=tree->postw;
// auto &opow=tree->opostw;
for (int i = 0; i < postw.size(); i++) {
auto &lpow = postw[i];
int nw = lpow.size();
int nparts;
int nthreads = omp_get_max_threads();
if (nw >= nthreads) {
nparts = nthreads;
} else {
nparts = nw / 2;
}
if (nparts == 0)nparts = 1;
opostw[i].resize(nparts);
binpacking(lpow, opostw[i], nparts, ctree, tree, rtmp);
}
len = opostw.size();
len = 0;
int index = 0;
for (auto &v:postw) {
tree.clevelset[index++] = len;
len += v.size();
}
tree.clevelset[index] = len;
len = 0;
index = 0;
int tidx = 0;
for (auto &v:postw) {
for (auto &w:v) {
tree.wpart[index++] = len;
len += w.size();
for (auto &t:w) {
tree.idx[tidx++] = t;
}
}
}
tree.wpart[index] = len;
}
void compression(clustertree &ctree, HTree &tree, ret *rtmp, double *X, int m, int n, Internal::Ktype ktype, int dim, double h,
double acc = 1e-5) {
#pragma omp parallel for
for (int j = tree.levelset[tree.depth - 1]; j < tree.levelset[tree.depth]; j++) {
auto id = tree.idx[j];
skeletonize_leaf(id, tree, rtmp, m, n, X, ktype, dim, h, acc);
}
for (int i = tree.depth - 2; i > -1; i--) {
#pragma omp parallel for
for (int j = tree.levelset[i]; j < tree.levelset[i + 1]; j++) {
auto id = tree.idx[j];
skeletonize_internal(id, tree, rtmp, m, n, X, ktype, dim, h, acc);
}
}
BalanceCoarLevelSet(ctree,tree,rtmp);
}
unsigned long computeFlops(HTree &tree, ret *rtmp, int m, int nrhs)
{
unsigned long flops = 0;
unsigned long tflops = 0;
unsigned long inflops = 0;
for (int j = tree.levelset[tree.depth - 1]; j < tree.levelset[tree.depth]; j++) {
auto id = tree.idx[j];
flops+=2*rtmp[id].skels_length * nrhs*tree.Dim[tree.lm[id]];
}
for (int i = tree.depth - 2; i > -1; i--) {
for (int j = tree.levelset[i]; j < tree.levelset[i + 1]; j++)
{
auto id = tree.idx[j];
auto lc = tree.tlchildren[id];
auto rc = tree.trchildren[id];
flops += 2 * rtmp[id].skels_length * (rtmp[lc].skels_length+rtmp[rc].skels_length)*nrhs;
}
}
flops = 2*flops;
tflops += flops;
printf("tree flops is %lu\n", tflops);
for(int k=0; k<tree.ncount; k++)
{
auto nx = tree.nxval[k];
auto ny = tree.nyval[k];
auto dimx = tree.Dim[nx];
auto dimy = tree.Dim[ny];
flops += 2*dimx*dimy*nrhs;
}
printf("number of near nodes is %d\n", tree.ncount);
for(int k = 0; k<tree.fcount; k++)
{
auto fx = tree.fxval[k];
auto fy = tree.fyval[k];
auto dimx = rtmp[fx].skels_length;
auto dimy = rtmp[fy].skels_length;
flops += 2*dimx*dimy*nrhs;
}
printf("number of far nodes is %d\n", tree.fcount);
printf("interaction flops is %lu\n", flops-tflops);
return flops;
}
unsigned long computeRanks(HTree &tree, ret *rtmp, int m, int nrhs)
{
unsigned long rank = 0;
int count=0;
for (int j = tree.levelset[tree.depth - 1]; j < tree.levelset[tree.depth]; j++) {
auto id = tree.idx[j];
rank +=rtmp[id].skels_length;
++count;
// flops+=2*rtmp[id].skels_length * nrhs*tree.Dim[tree.lm[id]];
}
for (int i = tree.depth - 2; i > -1; i--) {
for (int j = tree.levelset[i]; j < tree.levelset[i + 1]; j++)
{
auto id = tree.idx[j];
rank +=rtmp[id].skels_length;
++count;
// auto lc = tree.tlchildren[id];
// auto rc = tree.trchildren[id];
// flops += 2 * rtmp[id].skels_length * (rtmp[lc].skels_length+rtmp[rc].skels_length)*nrhs;
}
}
return rank/count;
}
void
coarcompression(HTree &tree, ret *rtmp, double *X, int m, int n, Internal::Ktype ktype, int dim, int h, double acc) {
#pragma omp parallel for
for (int k = tree.clevelset[0]; k < tree.clevelset[1]; k++) {
for (int j = tree.wpart[k]; j < tree.wpart[k + 1]; j++) {
auto id = tree.cidx[j];
if (tree.tlchildren[id] == -1) {
skeletonize_leaf(id, tree.lids, tree.lidslen, tree.lidsoffset, rtmp, tree.sids, tree.sidlen,
tree.sidoffset, tree.tlchildren,
tree.trchildren, m, n, X, ktype, dim, h, acc);
} else {
skeletonize_internal(id, tree.lids, tree.lidslen, tree.lidsoffset, rtmp, tree.sids, tree.sidlen,
tree.sidoffset, tree.tlchildren,
tree.trchildren, m, n, X, ktype, dim, h, acc);
}
// skeletonize(id, tree.lids, tree.lidslen, tree.lidsoffset, rtmp, tree.sids, tree.sidlen, tree.sidoffset, tree.tlchildren,
// tree.trchildren, m, n, X, ktype, dim, h);
}
}
for (int i = 1; i < tree.cdepth; i++) {
#pragma omp parallel for
for (int k = tree.clevelset[i]; k < tree.clevelset[i + 1]; k++) {
//#pragma omp parallel for
for (int j = tree.wpart[k]; j < tree.wpart[k + 1]; j++) {
auto id = tree.cidx[j];
skeletonize_internal(id, tree.lids, tree.lidslen, tree.lidsoffset, rtmp, tree.sids, tree.sidlen,
tree.sidoffset, tree.tlchildren,
tree.trchildren, m, n, X, ktype, dim, h, acc);
}
}
}
}
#endif //PROJECT_COMPRESSION_H
|
convolution_7x7_pack1ton.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv7x7s2_pack1ton_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
const int packn = csrr_vlenb() / 4;
const word_type vl = vsetvl_e32m1(packn);
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2 * outw + w;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
vfloat32m1_t _bias0 = bias ? vle32_v_f32m1(bias + p * packn, vl) : vfmv_v_f_f32m1(0.f, vl);
out0.fill(_bias0);
for (int q = 0; q < inch; q++)
{
float* outptr0 = out0;
const Mat img0 = bottom_blob.channel(q);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
const float* r3 = img0.row(3);
const float* r4 = img0.row(4);
const float* r5 = img0.row(5);
const float* r6 = img0.row(6);
const float* kptr = kernel.channel(p).row(q);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 7 < outw; j += 8)
{
vfloat32m1_t _sum0 = vle32_v_f32m1(outptr0, vl);
vfloat32m1_t _sum1 = vle32_v_f32m1(outptr0 + packn, vl);
vfloat32m1_t _sum2 = vle32_v_f32m1(outptr0 + packn * 2, vl);
vfloat32m1_t _sum3 = vle32_v_f32m1(outptr0 + packn * 3, vl);
vfloat32m1_t _sum4 = vle32_v_f32m1(outptr0 + packn * 4, vl);
vfloat32m1_t _sum5 = vle32_v_f32m1(outptr0 + packn * 5, vl);
vfloat32m1_t _sum6 = vle32_v_f32m1(outptr0 + packn * 6, vl);
vfloat32m1_t _sum7 = vle32_v_f32m1(outptr0 + packn * 7, vl);
vfloat32m1_t _k00 = vle32_v_f32m1(kptr, vl);
vfloat32m1_t _k01 = vle32_v_f32m1(kptr + packn, vl);
vfloat32m1_t _k02 = vle32_v_f32m1(kptr + packn * 2, vl);
vfloat32m1_t _k03 = vle32_v_f32m1(kptr + packn * 3, vl);
vfloat32m1_t _k04 = vle32_v_f32m1(kptr + packn * 4, vl);
vfloat32m1_t _k05 = vle32_v_f32m1(kptr + packn * 5, vl);
vfloat32m1_t _k06 = vle32_v_f32m1(kptr + packn * 6, vl);
kptr += packn * 7;
_sum0 = vfmacc_vf_f32m1(_sum0, r0[0], _k00, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r0[2], _k00, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r0[4], _k00, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r0[6], _k00, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, r0[8], _k00, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, r0[10], _k00, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, r0[12], _k00, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, r0[14], _k00, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r0[1], _k01, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r0[3], _k01, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r0[5], _k01, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r0[7], _k01, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, r0[9], _k01, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, r0[11], _k01, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, r0[13], _k01, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, r0[15], _k01, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r0[2], _k02, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r0[4], _k02, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r0[6], _k02, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r0[8], _k02, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, r0[10], _k02, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, r0[12], _k02, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, r0[14], _k02, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, r0[16], _k02, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r0[3], _k03, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r0[5], _k03, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r0[7], _k03, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r0[9], _k03, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, r0[11], _k03, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, r0[13], _k03, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, r0[15], _k03, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, r0[17], _k03, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r0[4], _k04, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r0[6], _k04, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r0[8], _k04, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r0[10], _k04, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, r0[12], _k04, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, r0[14], _k04, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, r0[16], _k04, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, r0[18], _k04, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r0[5], _k05, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r0[7], _k05, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r0[9], _k05, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r0[11], _k05, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, r0[13], _k05, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, r0[15], _k05, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, r0[17], _k05, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, r0[19], _k05, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r0[6], _k06, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r0[8], _k06, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r0[10], _k06, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r0[12], _k06, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, r0[14], _k06, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, r0[16], _k06, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, r0[18], _k06, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, r0[20], _k06, vl);
vfloat32m1_t _k10 = vle32_v_f32m1(kptr, vl);
vfloat32m1_t _k11 = vle32_v_f32m1(kptr + packn, vl);
vfloat32m1_t _k12 = vle32_v_f32m1(kptr + packn * 2, vl);
vfloat32m1_t _k13 = vle32_v_f32m1(kptr + packn * 3, vl);
vfloat32m1_t _k14 = vle32_v_f32m1(kptr + packn * 4, vl);
vfloat32m1_t _k15 = vle32_v_f32m1(kptr + packn * 5, vl);
vfloat32m1_t _k16 = vle32_v_f32m1(kptr + packn * 6, vl);
kptr += packn * 7;
_sum0 = vfmacc_vf_f32m1(_sum0, r1[0], _k10, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r1[2], _k10, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r1[4], _k10, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r1[6], _k10, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, r1[8], _k10, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, r1[10], _k10, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, r1[12], _k10, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, r1[14], _k10, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r1[1], _k11, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r1[3], _k11, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r1[5], _k11, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r1[7], _k11, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, r1[9], _k11, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, r1[11], _k11, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, r1[13], _k11, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, r1[15], _k11, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r1[2], _k12, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r1[4], _k12, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r1[6], _k12, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r1[8], _k12, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, r1[10], _k12, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, r1[12], _k12, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, r1[14], _k12, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, r1[16], _k12, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r1[3], _k13, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r1[5], _k13, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r1[7], _k13, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r1[9], _k13, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, r1[11], _k13, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, r1[13], _k13, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, r1[15], _k13, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, r1[17], _k13, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r1[4], _k14, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r1[6], _k14, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r1[8], _k14, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r1[10], _k14, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, r1[12], _k14, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, r1[14], _k14, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, r1[16], _k14, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, r1[18], _k14, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r1[5], _k15, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r1[7], _k15, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r1[9], _k15, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r1[11], _k15, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, r1[13], _k15, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, r1[15], _k15, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, r1[17], _k15, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, r1[19], _k15, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r1[6], _k16, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r1[8], _k16, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r1[10], _k16, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r1[12], _k16, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, r1[14], _k16, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, r1[16], _k16, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, r1[18], _k16, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, r1[20], _k16, vl);
vfloat32m1_t _k20 = vle32_v_f32m1(kptr, vl);
vfloat32m1_t _k21 = vle32_v_f32m1(kptr + packn, vl);
vfloat32m1_t _k22 = vle32_v_f32m1(kptr + packn * 2, vl);
vfloat32m1_t _k23 = vle32_v_f32m1(kptr + packn * 3, vl);
vfloat32m1_t _k24 = vle32_v_f32m1(kptr + packn * 4, vl);
vfloat32m1_t _k25 = vle32_v_f32m1(kptr + packn * 5, vl);
vfloat32m1_t _k26 = vle32_v_f32m1(kptr + packn * 6, vl);
kptr += packn * 7;
_sum0 = vfmacc_vf_f32m1(_sum0, r2[0], _k20, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r2[2], _k20, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r2[4], _k20, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r2[6], _k20, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, r2[8], _k20, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, r2[10], _k20, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, r2[12], _k20, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, r2[14], _k20, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r2[1], _k21, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r2[3], _k21, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r2[5], _k21, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r2[7], _k21, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, r2[9], _k21, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, r2[11], _k21, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, r2[13], _k21, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, r2[15], _k21, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r2[2], _k22, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r2[4], _k22, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r2[6], _k22, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r2[8], _k22, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, r2[10], _k22, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, r2[12], _k22, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, r2[14], _k22, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, r2[16], _k22, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r2[3], _k23, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r2[5], _k23, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r2[7], _k23, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r2[9], _k23, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, r2[11], _k23, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, r2[13], _k23, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, r2[15], _k23, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, r2[17], _k23, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r2[4], _k24, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r2[6], _k24, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r2[8], _k24, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r2[10], _k24, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, r2[12], _k24, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, r2[14], _k24, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, r2[16], _k24, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, r2[18], _k24, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r2[5], _k25, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r2[7], _k25, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r2[9], _k25, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r2[11], _k25, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, r2[13], _k25, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, r2[15], _k25, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, r2[17], _k25, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, r2[19], _k25, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r2[6], _k26, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r2[8], _k26, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r2[10], _k26, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r2[12], _k26, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, r2[14], _k26, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, r2[16], _k26, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, r2[18], _k26, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, r2[20], _k26, vl);
vfloat32m1_t _k30 = vle32_v_f32m1(kptr, vl);
vfloat32m1_t _k31 = vle32_v_f32m1(kptr + packn, vl);
vfloat32m1_t _k32 = vle32_v_f32m1(kptr + packn * 2, vl);
vfloat32m1_t _k33 = vle32_v_f32m1(kptr + packn * 3, vl);
vfloat32m1_t _k34 = vle32_v_f32m1(kptr + packn * 4, vl);
vfloat32m1_t _k35 = vle32_v_f32m1(kptr + packn * 5, vl);
vfloat32m1_t _k36 = vle32_v_f32m1(kptr + packn * 6, vl);
kptr += packn * 7;
_sum0 = vfmacc_vf_f32m1(_sum0, r3[0], _k30, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r3[2], _k30, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r3[4], _k30, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r3[6], _k30, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, r3[8], _k30, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, r3[10], _k30, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, r3[12], _k30, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, r3[14], _k30, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r3[1], _k31, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r3[3], _k31, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r3[5], _k31, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r3[7], _k31, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, r3[9], _k31, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, r3[11], _k31, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, r3[13], _k31, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, r3[15], _k31, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r3[2], _k32, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r3[4], _k32, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r3[6], _k32, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r3[8], _k32, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, r3[10], _k32, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, r3[12], _k32, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, r3[14], _k32, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, r3[16], _k32, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r3[3], _k33, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r3[5], _k33, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r3[7], _k33, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r3[9], _k33, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, r3[11], _k33, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, r3[13], _k33, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, r3[15], _k33, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, r3[17], _k33, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r3[4], _k34, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r3[6], _k34, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r3[8], _k34, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r3[10], _k34, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, r3[12], _k34, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, r3[14], _k34, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, r3[16], _k34, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, r3[18], _k34, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r3[5], _k35, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r3[7], _k35, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r3[9], _k35, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r3[11], _k35, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, r3[13], _k35, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, r3[15], _k35, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, r3[17], _k35, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, r3[19], _k35, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r3[6], _k36, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r3[8], _k36, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r3[10], _k36, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r3[12], _k36, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, r3[14], _k36, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, r3[16], _k36, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, r3[18], _k36, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, r3[20], _k36, vl);
vfloat32m1_t _k40 = vle32_v_f32m1(kptr, vl);
vfloat32m1_t _k41 = vle32_v_f32m1(kptr + packn, vl);
vfloat32m1_t _k42 = vle32_v_f32m1(kptr + packn * 2, vl);
vfloat32m1_t _k43 = vle32_v_f32m1(kptr + packn * 3, vl);
vfloat32m1_t _k44 = vle32_v_f32m1(kptr + packn * 4, vl);
vfloat32m1_t _k45 = vle32_v_f32m1(kptr + packn * 5, vl);
vfloat32m1_t _k46 = vle32_v_f32m1(kptr + packn * 6, vl);
kptr += packn * 7;
_sum0 = vfmacc_vf_f32m1(_sum0, r4[0], _k40, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r4[2], _k40, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r4[4], _k40, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r4[6], _k40, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, r4[8], _k40, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, r4[10], _k40, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, r4[12], _k40, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, r4[14], _k40, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r4[1], _k41, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r4[3], _k41, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r4[5], _k41, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r4[7], _k41, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, r4[9], _k41, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, r4[11], _k41, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, r4[13], _k41, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, r4[15], _k41, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r4[2], _k42, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r4[4], _k42, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r4[6], _k42, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r4[8], _k42, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, r4[10], _k42, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, r4[12], _k42, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, r4[14], _k42, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, r4[16], _k42, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r4[3], _k43, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r4[5], _k43, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r4[7], _k43, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r4[9], _k43, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, r4[11], _k43, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, r4[13], _k43, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, r4[15], _k43, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, r4[17], _k43, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r4[4], _k44, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r4[6], _k44, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r4[8], _k44, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r4[10], _k44, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, r4[12], _k44, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, r4[14], _k44, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, r4[16], _k44, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, r4[18], _k44, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r4[5], _k45, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r4[7], _k45, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r4[9], _k45, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r4[11], _k45, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, r4[13], _k45, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, r4[15], _k45, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, r4[17], _k45, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, r4[19], _k45, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r4[6], _k46, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r4[8], _k46, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r4[10], _k46, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r4[12], _k46, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, r4[14], _k46, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, r4[16], _k46, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, r4[18], _k46, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, r4[20], _k46, vl);
vfloat32m1_t _k50 = vle32_v_f32m1(kptr, vl);
vfloat32m1_t _k51 = vle32_v_f32m1(kptr + packn, vl);
vfloat32m1_t _k52 = vle32_v_f32m1(kptr + packn * 2, vl);
vfloat32m1_t _k53 = vle32_v_f32m1(kptr + packn * 3, vl);
vfloat32m1_t _k54 = vle32_v_f32m1(kptr + packn * 4, vl);
vfloat32m1_t _k55 = vle32_v_f32m1(kptr + packn * 5, vl);
vfloat32m1_t _k56 = vle32_v_f32m1(kptr + packn * 6, vl);
kptr += packn * 7;
_sum0 = vfmacc_vf_f32m1(_sum0, r5[0], _k50, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r5[2], _k50, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r5[4], _k50, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r5[6], _k50, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, r5[8], _k50, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, r5[10], _k50, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, r5[12], _k50, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, r5[14], _k50, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r5[1], _k51, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r5[3], _k51, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r5[5], _k51, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r5[7], _k51, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, r5[9], _k51, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, r5[11], _k51, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, r5[13], _k51, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, r5[15], _k51, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r5[2], _k52, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r5[4], _k52, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r5[6], _k52, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r5[8], _k52, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, r5[10], _k52, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, r5[12], _k52, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, r5[14], _k52, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, r5[16], _k52, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r5[3], _k53, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r5[5], _k53, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r5[7], _k53, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r5[9], _k53, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, r5[11], _k53, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, r5[13], _k53, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, r5[15], _k53, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, r5[17], _k53, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r5[4], _k54, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r5[6], _k54, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r5[8], _k54, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r5[10], _k54, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, r5[12], _k54, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, r5[14], _k54, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, r5[16], _k54, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, r5[18], _k54, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r5[5], _k55, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r5[7], _k55, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r5[9], _k55, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r5[11], _k55, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, r5[13], _k55, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, r5[15], _k55, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, r5[17], _k55, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, r5[19], _k55, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r5[6], _k56, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r5[8], _k56, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r5[10], _k56, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r5[12], _k56, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, r5[14], _k56, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, r5[16], _k56, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, r5[18], _k56, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, r5[20], _k56, vl);
vfloat32m1_t _k60 = vle32_v_f32m1(kptr, vl);
vfloat32m1_t _k61 = vle32_v_f32m1(kptr + packn, vl);
vfloat32m1_t _k62 = vle32_v_f32m1(kptr + packn * 2, vl);
vfloat32m1_t _k63 = vle32_v_f32m1(kptr + packn * 3, vl);
vfloat32m1_t _k64 = vle32_v_f32m1(kptr + packn * 4, vl);
vfloat32m1_t _k65 = vle32_v_f32m1(kptr + packn * 5, vl);
vfloat32m1_t _k66 = vle32_v_f32m1(kptr + packn * 6, vl);
kptr -= packn * 42;
_sum0 = vfmacc_vf_f32m1(_sum0, r6[0], _k60, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r6[2], _k60, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r6[4], _k60, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r6[6], _k60, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, r6[8], _k60, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, r6[10], _k60, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, r6[12], _k60, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, r6[14], _k60, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r6[1], _k61, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r6[3], _k61, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r6[5], _k61, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r6[7], _k61, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, r6[9], _k61, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, r6[11], _k61, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, r6[13], _k61, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, r6[15], _k61, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r6[2], _k62, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r6[4], _k62, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r6[6], _k62, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r6[8], _k62, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, r6[10], _k62, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, r6[12], _k62, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, r6[14], _k62, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, r6[16], _k62, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r6[3], _k63, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r6[5], _k63, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r6[7], _k63, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r6[9], _k63, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, r6[11], _k63, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, r6[13], _k63, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, r6[15], _k63, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, r6[17], _k63, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r6[4], _k64, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r6[6], _k64, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r6[8], _k64, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r6[10], _k64, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, r6[12], _k64, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, r6[14], _k64, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, r6[16], _k64, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, r6[18], _k64, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r6[5], _k65, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r6[7], _k65, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r6[9], _k65, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r6[11], _k65, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, r6[13], _k65, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, r6[15], _k65, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, r6[17], _k65, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, r6[19], _k65, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r6[6], _k66, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r6[8], _k66, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r6[10], _k66, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r6[12], _k66, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, r6[14], _k66, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, r6[16], _k66, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, r6[18], _k66, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, r6[20], _k66, vl);
vse32_v_f32m1(outptr0, _sum0, vl);
vse32_v_f32m1(outptr0 + packn, _sum1, vl);
vse32_v_f32m1(outptr0 + packn * 2, _sum2, vl);
vse32_v_f32m1(outptr0 + packn * 3, _sum3, vl);
vse32_v_f32m1(outptr0 + packn * 4, _sum4, vl);
vse32_v_f32m1(outptr0 + packn * 5, _sum5, vl);
vse32_v_f32m1(outptr0 + packn * 6, _sum6, vl);
vse32_v_f32m1(outptr0 + packn * 7, _sum7, vl);
outptr0 += packn * 8;
r0 += 16;
r1 += 16;
r2 += 16;
r3 += 16;
r4 += 16;
r5 += 16;
r6 += 16;
}
for (; j + 3 < outw; j += 4)
{
vfloat32m1_t _sum0 = vle32_v_f32m1(outptr0, vl);
vfloat32m1_t _sum1 = vle32_v_f32m1(outptr0 + packn, vl);
vfloat32m1_t _sum2 = vle32_v_f32m1(outptr0 + packn * 2, vl);
vfloat32m1_t _sum3 = vle32_v_f32m1(outptr0 + packn * 3, vl);
vfloat32m1_t _k00 = vle32_v_f32m1(kptr, vl);
vfloat32m1_t _k01 = vle32_v_f32m1(kptr + packn, vl);
vfloat32m1_t _k02 = vle32_v_f32m1(kptr + packn * 2, vl);
vfloat32m1_t _k03 = vle32_v_f32m1(kptr + packn * 3, vl);
vfloat32m1_t _k04 = vle32_v_f32m1(kptr + packn * 4, vl);
vfloat32m1_t _k05 = vle32_v_f32m1(kptr + packn * 5, vl);
vfloat32m1_t _k06 = vle32_v_f32m1(kptr + packn * 6, vl);
kptr += packn * 7;
_sum0 = vfmacc_vf_f32m1(_sum0, r0[0], _k00, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r0[2], _k00, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r0[4], _k00, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r0[6], _k00, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r0[1], _k01, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r0[3], _k01, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r0[5], _k01, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r0[7], _k01, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r0[2], _k02, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r0[4], _k02, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r0[6], _k02, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r0[8], _k02, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r0[3], _k03, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r0[5], _k03, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r0[7], _k03, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r0[9], _k03, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r0[4], _k04, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r0[6], _k04, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r0[8], _k04, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r0[10], _k04, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r0[5], _k05, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r0[7], _k05, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r0[9], _k05, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r0[11], _k05, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r0[6], _k06, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r0[8], _k06, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r0[10], _k06, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r0[12], _k06, vl);
vfloat32m1_t _k10 = vle32_v_f32m1(kptr, vl);
vfloat32m1_t _k11 = vle32_v_f32m1(kptr + packn, vl);
vfloat32m1_t _k12 = vle32_v_f32m1(kptr + packn * 2, vl);
vfloat32m1_t _k13 = vle32_v_f32m1(kptr + packn * 3, vl);
vfloat32m1_t _k14 = vle32_v_f32m1(kptr + packn * 4, vl);
vfloat32m1_t _k15 = vle32_v_f32m1(kptr + packn * 5, vl);
vfloat32m1_t _k16 = vle32_v_f32m1(kptr + packn * 6, vl);
kptr += packn * 7;
_sum0 = vfmacc_vf_f32m1(_sum0, r1[0], _k10, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r1[2], _k10, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r1[4], _k10, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r1[6], _k10, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r1[1], _k11, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r1[3], _k11, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r1[5], _k11, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r1[7], _k11, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r1[2], _k12, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r1[4], _k12, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r1[6], _k12, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r1[8], _k12, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r1[3], _k13, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r1[5], _k13, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r1[7], _k13, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r1[9], _k13, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r1[4], _k14, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r1[6], _k14, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r1[8], _k14, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r1[10], _k14, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r1[5], _k15, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r1[7], _k15, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r1[9], _k15, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r1[11], _k15, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r1[6], _k16, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r1[8], _k16, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r1[10], _k16, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r1[12], _k16, vl);
vfloat32m1_t _k20 = vle32_v_f32m1(kptr, vl);
vfloat32m1_t _k21 = vle32_v_f32m1(kptr + packn, vl);
vfloat32m1_t _k22 = vle32_v_f32m1(kptr + packn * 2, vl);
vfloat32m1_t _k23 = vle32_v_f32m1(kptr + packn * 3, vl);
vfloat32m1_t _k24 = vle32_v_f32m1(kptr + packn * 4, vl);
vfloat32m1_t _k25 = vle32_v_f32m1(kptr + packn * 5, vl);
vfloat32m1_t _k26 = vle32_v_f32m1(kptr + packn * 6, vl);
kptr += packn * 7;
_sum0 = vfmacc_vf_f32m1(_sum0, r2[0], _k20, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r2[2], _k20, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r2[4], _k20, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r2[6], _k20, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r2[1], _k21, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r2[3], _k21, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r2[5], _k21, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r2[7], _k21, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r2[2], _k22, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r2[4], _k22, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r2[6], _k22, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r2[8], _k22, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r2[3], _k23, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r2[5], _k23, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r2[7], _k23, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r2[9], _k23, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r2[4], _k24, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r2[6], _k24, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r2[8], _k24, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r2[10], _k24, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r2[5], _k25, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r2[7], _k25, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r2[9], _k25, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r2[11], _k25, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r2[6], _k26, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r2[8], _k26, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r2[10], _k26, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r2[12], _k26, vl);
vfloat32m1_t _k30 = vle32_v_f32m1(kptr, vl);
vfloat32m1_t _k31 = vle32_v_f32m1(kptr + packn, vl);
vfloat32m1_t _k32 = vle32_v_f32m1(kptr + packn * 2, vl);
vfloat32m1_t _k33 = vle32_v_f32m1(kptr + packn * 3, vl);
vfloat32m1_t _k34 = vle32_v_f32m1(kptr + packn * 4, vl);
vfloat32m1_t _k35 = vle32_v_f32m1(kptr + packn * 5, vl);
vfloat32m1_t _k36 = vle32_v_f32m1(kptr + packn * 6, vl);
kptr += packn * 7;
_sum0 = vfmacc_vf_f32m1(_sum0, r3[0], _k30, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r3[2], _k30, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r3[4], _k30, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r3[6], _k30, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r3[1], _k31, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r3[3], _k31, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r3[5], _k31, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r3[7], _k31, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r3[2], _k32, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r3[4], _k32, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r3[6], _k32, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r3[8], _k32, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r3[3], _k33, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r3[5], _k33, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r3[7], _k33, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r3[9], _k33, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r3[4], _k34, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r3[6], _k34, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r3[8], _k34, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r3[10], _k34, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r3[5], _k35, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r3[7], _k35, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r3[9], _k35, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r3[11], _k35, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r3[6], _k36, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r3[8], _k36, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r3[10], _k36, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r3[12], _k36, vl);
vfloat32m1_t _k40 = vle32_v_f32m1(kptr, vl);
vfloat32m1_t _k41 = vle32_v_f32m1(kptr + packn, vl);
vfloat32m1_t _k42 = vle32_v_f32m1(kptr + packn * 2, vl);
vfloat32m1_t _k43 = vle32_v_f32m1(kptr + packn * 3, vl);
vfloat32m1_t _k44 = vle32_v_f32m1(kptr + packn * 4, vl);
vfloat32m1_t _k45 = vle32_v_f32m1(kptr + packn * 5, vl);
vfloat32m1_t _k46 = vle32_v_f32m1(kptr + packn * 6, vl);
kptr += packn * 7;
_sum0 = vfmacc_vf_f32m1(_sum0, r4[0], _k40, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r4[2], _k40, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r4[4], _k40, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r4[6], _k40, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r4[1], _k41, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r4[3], _k41, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r4[5], _k41, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r4[7], _k41, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r4[2], _k42, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r4[4], _k42, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r4[6], _k42, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r4[8], _k42, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r4[3], _k43, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r4[5], _k43, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r4[7], _k43, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r4[9], _k43, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r4[4], _k44, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r4[6], _k44, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r4[8], _k44, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r4[10], _k44, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r4[5], _k45, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r4[7], _k45, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r4[9], _k45, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r4[11], _k45, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r4[6], _k46, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r4[8], _k46, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r4[10], _k46, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r4[12], _k46, vl);
vfloat32m1_t _k50 = vle32_v_f32m1(kptr, vl);
vfloat32m1_t _k51 = vle32_v_f32m1(kptr + packn, vl);
vfloat32m1_t _k52 = vle32_v_f32m1(kptr + packn * 2, vl);
vfloat32m1_t _k53 = vle32_v_f32m1(kptr + packn * 3, vl);
vfloat32m1_t _k54 = vle32_v_f32m1(kptr + packn * 4, vl);
vfloat32m1_t _k55 = vle32_v_f32m1(kptr + packn * 5, vl);
vfloat32m1_t _k56 = vle32_v_f32m1(kptr + packn * 6, vl);
kptr += packn * 7;
_sum0 = vfmacc_vf_f32m1(_sum0, r5[0], _k50, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r5[2], _k50, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r5[4], _k50, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r5[6], _k50, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r5[1], _k51, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r5[3], _k51, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r5[5], _k51, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r5[7], _k51, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r5[2], _k52, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r5[4], _k52, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r5[6], _k52, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r5[8], _k52, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r5[3], _k53, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r5[5], _k53, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r5[7], _k53, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r5[9], _k53, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r5[4], _k54, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r5[6], _k54, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r5[8], _k54, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r5[10], _k54, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r5[5], _k55, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r5[7], _k55, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r5[9], _k55, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r5[11], _k55, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r5[6], _k56, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r5[8], _k56, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r5[10], _k56, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r5[12], _k56, vl);
vfloat32m1_t _k60 = vle32_v_f32m1(kptr, vl);
vfloat32m1_t _k61 = vle32_v_f32m1(kptr + packn, vl);
vfloat32m1_t _k62 = vle32_v_f32m1(kptr + packn * 2, vl);
vfloat32m1_t _k63 = vle32_v_f32m1(kptr + packn * 3, vl);
vfloat32m1_t _k64 = vle32_v_f32m1(kptr + packn * 4, vl);
vfloat32m1_t _k65 = vle32_v_f32m1(kptr + packn * 5, vl);
vfloat32m1_t _k66 = vle32_v_f32m1(kptr + packn * 6, vl);
kptr -= packn * 42;
_sum0 = vfmacc_vf_f32m1(_sum0, r6[0], _k60, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r6[2], _k60, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r6[4], _k60, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r6[6], _k60, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r6[1], _k61, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r6[3], _k61, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r6[5], _k61, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r6[7], _k61, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r6[2], _k62, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r6[4], _k62, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r6[6], _k62, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r6[8], _k62, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r6[3], _k63, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r6[5], _k63, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r6[7], _k63, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r6[9], _k63, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r6[4], _k64, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r6[6], _k64, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r6[8], _k64, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r6[10], _k64, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r6[5], _k65, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r6[7], _k65, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r6[9], _k65, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r6[11], _k65, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r6[6], _k66, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, r6[8], _k66, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, r6[10], _k66, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, r6[12], _k66, vl);
vse32_v_f32m1(outptr0, _sum0, vl);
vse32_v_f32m1(outptr0 + packn, _sum1, vl);
vse32_v_f32m1(outptr0 + packn * 2, _sum2, vl);
vse32_v_f32m1(outptr0 + packn * 3, _sum3, vl);
outptr0 += packn * 4;
r0 += 8;
r1 += 8;
r2 += 8;
r3 += 8;
r4 += 8;
r5 += 8;
r6 += 8;
}
for (; j < outw; j++)
{
vfloat32m1_t _sum0 = vle32_v_f32m1(outptr0, vl);
vfloat32m1_t _k00 = vle32_v_f32m1(kptr, vl);
vfloat32m1_t _k01 = vle32_v_f32m1(kptr + packn, vl);
vfloat32m1_t _k02 = vle32_v_f32m1(kptr + packn * 2, vl);
vfloat32m1_t _k03 = vle32_v_f32m1(kptr + packn * 3, vl);
vfloat32m1_t _k04 = vle32_v_f32m1(kptr + packn * 4, vl);
vfloat32m1_t _k05 = vle32_v_f32m1(kptr + packn * 5, vl);
vfloat32m1_t _k06 = vle32_v_f32m1(kptr + packn * 6, vl);
kptr += packn * 7;
_sum0 = vfmacc_vf_f32m1(_sum0, r0[0], _k00, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r0[1], _k01, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r0[2], _k02, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r0[3], _k03, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r0[4], _k04, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r0[5], _k05, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r0[6], _k06, vl);
vfloat32m1_t _k10 = vle32_v_f32m1(kptr, vl);
vfloat32m1_t _k11 = vle32_v_f32m1(kptr + packn, vl);
vfloat32m1_t _k12 = vle32_v_f32m1(kptr + packn * 2, vl);
vfloat32m1_t _k13 = vle32_v_f32m1(kptr + packn * 3, vl);
vfloat32m1_t _k14 = vle32_v_f32m1(kptr + packn * 4, vl);
vfloat32m1_t _k15 = vle32_v_f32m1(kptr + packn * 5, vl);
vfloat32m1_t _k16 = vle32_v_f32m1(kptr + packn * 6, vl);
kptr += packn * 7;
_sum0 = vfmacc_vf_f32m1(_sum0, r1[0], _k10, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r1[1], _k11, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r1[2], _k12, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r1[3], _k13, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r1[4], _k14, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r1[5], _k15, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r1[6], _k16, vl);
vfloat32m1_t _k20 = vle32_v_f32m1(kptr, vl);
vfloat32m1_t _k21 = vle32_v_f32m1(kptr + packn, vl);
vfloat32m1_t _k22 = vle32_v_f32m1(kptr + packn * 2, vl);
vfloat32m1_t _k23 = vle32_v_f32m1(kptr + packn * 3, vl);
vfloat32m1_t _k24 = vle32_v_f32m1(kptr + packn * 4, vl);
vfloat32m1_t _k25 = vle32_v_f32m1(kptr + packn * 5, vl);
vfloat32m1_t _k26 = vle32_v_f32m1(kptr + packn * 6, vl);
kptr += packn * 7;
_sum0 = vfmacc_vf_f32m1(_sum0, r2[0], _k20, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r2[1], _k21, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r2[2], _k22, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r2[3], _k23, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r2[4], _k24, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r2[5], _k25, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r2[6], _k26, vl);
vfloat32m1_t _k30 = vle32_v_f32m1(kptr, vl);
vfloat32m1_t _k31 = vle32_v_f32m1(kptr + packn, vl);
vfloat32m1_t _k32 = vle32_v_f32m1(kptr + packn * 2, vl);
vfloat32m1_t _k33 = vle32_v_f32m1(kptr + packn * 3, vl);
vfloat32m1_t _k34 = vle32_v_f32m1(kptr + packn * 4, vl);
vfloat32m1_t _k35 = vle32_v_f32m1(kptr + packn * 5, vl);
vfloat32m1_t _k36 = vle32_v_f32m1(kptr + packn * 6, vl);
kptr += packn * 7;
_sum0 = vfmacc_vf_f32m1(_sum0, r3[0], _k30, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r3[1], _k31, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r3[2], _k32, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r3[3], _k33, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r3[4], _k34, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r3[5], _k35, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r3[6], _k36, vl);
vfloat32m1_t _k40 = vle32_v_f32m1(kptr, vl);
vfloat32m1_t _k41 = vle32_v_f32m1(kptr + packn, vl);
vfloat32m1_t _k42 = vle32_v_f32m1(kptr + packn * 2, vl);
vfloat32m1_t _k43 = vle32_v_f32m1(kptr + packn * 3, vl);
vfloat32m1_t _k44 = vle32_v_f32m1(kptr + packn * 4, vl);
vfloat32m1_t _k45 = vle32_v_f32m1(kptr + packn * 5, vl);
vfloat32m1_t _k46 = vle32_v_f32m1(kptr + packn * 6, vl);
kptr += packn * 7;
_sum0 = vfmacc_vf_f32m1(_sum0, r4[0], _k40, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r4[1], _k41, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r4[2], _k42, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r4[3], _k43, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r4[4], _k44, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r4[5], _k45, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r4[6], _k46, vl);
vfloat32m1_t _k50 = vle32_v_f32m1(kptr, vl);
vfloat32m1_t _k51 = vle32_v_f32m1(kptr + packn, vl);
vfloat32m1_t _k52 = vle32_v_f32m1(kptr + packn * 2, vl);
vfloat32m1_t _k53 = vle32_v_f32m1(kptr + packn * 3, vl);
vfloat32m1_t _k54 = vle32_v_f32m1(kptr + packn * 4, vl);
vfloat32m1_t _k55 = vle32_v_f32m1(kptr + packn * 5, vl);
vfloat32m1_t _k56 = vle32_v_f32m1(kptr + packn * 6, vl);
kptr += packn * 7;
_sum0 = vfmacc_vf_f32m1(_sum0, r5[0], _k50, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r5[1], _k51, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r5[2], _k52, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r5[3], _k53, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r5[4], _k54, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r5[5], _k55, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r5[6], _k56, vl);
vfloat32m1_t _k60 = vle32_v_f32m1(kptr, vl);
vfloat32m1_t _k61 = vle32_v_f32m1(kptr + packn, vl);
vfloat32m1_t _k62 = vle32_v_f32m1(kptr + packn * 2, vl);
vfloat32m1_t _k63 = vle32_v_f32m1(kptr + packn * 3, vl);
vfloat32m1_t _k64 = vle32_v_f32m1(kptr + packn * 4, vl);
vfloat32m1_t _k65 = vle32_v_f32m1(kptr + packn * 5, vl);
vfloat32m1_t _k66 = vle32_v_f32m1(kptr + packn * 6, vl);
kptr -= packn * 42;
_sum0 = vfmacc_vf_f32m1(_sum0, r6[0], _k60, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r6[1], _k61, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r6[2], _k62, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r6[3], _k63, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r6[4], _k64, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r6[5], _k65, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, r6[6], _k66, vl);
vse32_v_f32m1(outptr0, _sum0, vl);
outptr0 += packn;
r0 += 2;
r1 += 2;
r2 += 2;
r3 += 2;
r4 += 2;
r5 += 2;
r6 += 2;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
r4 += tailstep;
r5 += tailstep;
r6 += tailstep;
}
}
}
}
|
ast-dump-openmp-distribute.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test_one(int x) {
#pragma omp distribute
for (int i = 0; i < x; i++)
;
}
void test_two(int x, int y) {
#pragma omp distribute
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_three(int x, int y) {
#pragma omp distribute collapse(1)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_four(int x, int y) {
#pragma omp distribute collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_five(int x, int y, int z) {
#pragma omp distribute collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
for (int i = 0; i < z; i++)
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-distribute.c:3:1, line:7:1> line:3:6 test_one 'void (int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1>
// CHECK-NEXT: | `-OMPDistributeDirective {{.*}} <line:4:1, col:23>
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-distribute.c:4:1) *const restrict'
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1>
// CHECK-NEXT: | `-OMPDistributeDirective {{.*}} <line:10:1, col:23>
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-distribute.c:10:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1>
// CHECK-NEXT: | `-OMPDistributeDirective {{.*}} <line:17:1, col:35>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:24, col:34>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:33> 'int'
// CHECK-NEXT: | | |-value: Int 1
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:33> 'int' 1
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-distribute.c:17:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1>
// CHECK-NEXT: | `-OMPDistributeDirective {{.*}} <line:24:1, col:35>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:24, col:34>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:33> 'int'
// CHECK-NEXT: | | |-value: Int 2
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:33> 'int' 2
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-distribute.c:24:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1>
// CHECK-NEXT: `-OMPDistributeDirective {{.*}} <line:31:1, col:35>
// CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:24, col:34>
// CHECK-NEXT: | `-ConstantExpr {{.*}} <col:33> 'int'
// CHECK-NEXT: | |-value: Int 2
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:33> 'int' 2
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-distribute.c:31:1) *const restrict'
// CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
|
GB_binop__plus_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__plus_int64
// A.*B function (eWiseMult): GB_AemultB__plus_int64
// A*D function (colscale): GB_AxD__plus_int64
// D*A function (rowscale): GB_DxB__plus_int64
// C+=B function (dense accum): GB_Cdense_accumB__plus_int64
// C+=b function (dense accum): GB_Cdense_accumb__plus_int64
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__plus_int64
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__plus_int64
// C=scalar+B GB_bind1st__plus_int64
// C=scalar+B' GB_bind1st_tran__plus_int64
// C=A+scalar GB_bind2nd__plus_int64
// C=A'+scalar GB_bind2nd_tran__plus_int64
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = (aij + bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x + y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PLUS || GxB_NO_INT64 || GxB_NO_PLUS_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__plus_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__plus_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__plus_int64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__plus_int64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__plus_int64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__plus_int64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__plus_int64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__plus_int64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__plus_int64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t bij = Bx [p] ;
Cx [p] = (x + bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__plus_int64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t aij = Ax [p] ;
Cx [p] = (aij + y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = (x + aij) ; \
}
GrB_Info GB_bind1st_tran__plus_int64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = (aij + y) ; \
}
GrB_Info GB_bind2nd_tran__plus_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Stmt.h | //===- Stmt.h - Classes for representing statements -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Stmt interface and subclasses.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_STMT_H
#define LLVM_CLANG_AST_STMT_H
#include "clang/AST/DeclGroup.h"
#include "clang/AST/DependenceFlags.h"
#include "clang/AST/StmtIterator.h"
#include "clang/Basic/CapturedStmt.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitmaskEnum.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <iterator>
#include <string>
namespace llvm {
class FoldingSetNodeID;
} // namespace llvm
namespace clang {
class ASTContext;
class Attr;
class CapturedDecl;
class Decl;
class Expr;
class AddrLabelExpr;
class LabelDecl;
class ODRHash;
class PrinterHelper;
struct PrintingPolicy;
class RecordDecl;
class SourceManager;
class StringLiteral;
class Token;
class VarDecl;
//===----------------------------------------------------------------------===//
// AST classes for statements.
//===----------------------------------------------------------------------===//
/// Stmt - This represents one statement.
///
class alignas(void *) Stmt {
public:
enum StmtClass {
NoStmtClass = 0,
#define STMT(CLASS, PARENT) CLASS##Class,
#define STMT_RANGE(BASE, FIRST, LAST) \
first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class,
#define LAST_STMT_RANGE(BASE, FIRST, LAST) \
first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class
#define ABSTRACT_STMT(STMT)
#include "clang/AST/StmtNodes.inc"
};
// Make vanilla 'new' and 'delete' illegal for Stmts.
protected:
friend class ASTStmtReader;
friend class ASTStmtWriter;
void *operator new(size_t bytes) noexcept {
llvm_unreachable("Stmts cannot be allocated with regular 'new'.");
}
void operator delete(void *data) noexcept {
llvm_unreachable("Stmts cannot be released with regular 'delete'.");
}
//===--- Statement bitfields classes ---===//
class StmtBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class Stmt;
/// The statement class.
unsigned sClass : 8;
};
enum { NumStmtBits = 8 };
class NullStmtBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class NullStmt;
unsigned : NumStmtBits;
/// True if the null statement was preceded by an empty macro, e.g:
/// @code
/// #define CALL(x)
/// CALL(0);
/// @endcode
unsigned HasLeadingEmptyMacro : 1;
/// The location of the semi-colon.
SourceLocation SemiLoc;
};
class CompoundStmtBitfields {
friend class ASTStmtReader;
friend class CompoundStmt;
unsigned : NumStmtBits;
unsigned NumStmts : 32 - NumStmtBits;
/// The location of the opening "{".
SourceLocation LBraceLoc;
};
class LabelStmtBitfields {
friend class LabelStmt;
unsigned : NumStmtBits;
SourceLocation IdentLoc;
};
class AttributedStmtBitfields {
friend class ASTStmtReader;
friend class AttributedStmt;
unsigned : NumStmtBits;
/// Number of attributes.
unsigned NumAttrs : 32 - NumStmtBits;
/// The location of the attribute.
SourceLocation AttrLoc;
};
class IfStmtBitfields {
friend class ASTStmtReader;
friend class IfStmt;
unsigned : NumStmtBits;
/// True if this if statement is a constexpr if.
unsigned IsConstexpr : 1;
/// True if this if statement has storage for an else statement.
unsigned HasElse : 1;
/// True if this if statement has storage for a variable declaration.
unsigned HasVar : 1;
/// True if this if statement has storage for an init statement.
unsigned HasInit : 1;
/// The location of the "if".
SourceLocation IfLoc;
};
class SwitchStmtBitfields {
friend class SwitchStmt;
unsigned : NumStmtBits;
/// True if the SwitchStmt has storage for an init statement.
unsigned HasInit : 1;
/// True if the SwitchStmt has storage for a condition variable.
unsigned HasVar : 1;
/// If the SwitchStmt is a switch on an enum value, records whether all
/// the enum values were covered by CaseStmts. The coverage information
/// value is meant to be a hint for possible clients.
unsigned AllEnumCasesCovered : 1;
/// The location of the "switch".
SourceLocation SwitchLoc;
};
class WhileStmtBitfields {
friend class ASTStmtReader;
friend class WhileStmt;
unsigned : NumStmtBits;
/// True if the WhileStmt has storage for a condition variable.
unsigned HasVar : 1;
/// The location of the "while".
SourceLocation WhileLoc;
};
class DoStmtBitfields {
friend class DoStmt;
unsigned : NumStmtBits;
/// The location of the "do".
SourceLocation DoLoc;
};
class ForStmtBitfields {
friend class ForStmt;
unsigned : NumStmtBits;
/// The location of the "for".
SourceLocation ForLoc;
};
class GotoStmtBitfields {
friend class GotoStmt;
friend class IndirectGotoStmt;
unsigned : NumStmtBits;
/// The location of the "goto".
SourceLocation GotoLoc;
};
class ContinueStmtBitfields {
friend class ContinueStmt;
unsigned : NumStmtBits;
/// The location of the "continue".
SourceLocation ContinueLoc;
};
class BreakStmtBitfields {
friend class BreakStmt;
unsigned : NumStmtBits;
/// The location of the "break".
SourceLocation BreakLoc;
};
class ReturnStmtBitfields {
friend class ReturnStmt;
unsigned : NumStmtBits;
/// True if this ReturnStmt has storage for an NRVO candidate.
unsigned HasNRVOCandidate : 1;
/// The location of the "return".
SourceLocation RetLoc;
};
class SwitchCaseBitfields {
friend class SwitchCase;
friend class CaseStmt;
unsigned : NumStmtBits;
/// Used by CaseStmt to store whether it is a case statement
/// of the form case LHS ... RHS (a GNU extension).
unsigned CaseStmtIsGNURange : 1;
/// The location of the "case" or "default" keyword.
SourceLocation KeywordLoc;
};
//===--- Expression bitfields classes ---===//
class ExprBitfields {
friend class ASTStmtReader; // deserialization
friend class AtomicExpr; // ctor
friend class BlockDeclRefExpr; // ctor
friend class CallExpr; // ctor
friend class CXXConstructExpr; // ctor
friend class CXXDependentScopeMemberExpr; // ctor
friend class CXXNewExpr; // ctor
friend class CXXUnresolvedConstructExpr; // ctor
friend class DeclRefExpr; // computeDependence
friend class DependentScopeDeclRefExpr; // ctor
friend class DesignatedInitExpr; // ctor
friend class Expr;
friend class InitListExpr; // ctor
friend class ObjCArrayLiteral; // ctor
friend class ObjCDictionaryLiteral; // ctor
friend class ObjCMessageExpr; // ctor
friend class OffsetOfExpr; // ctor
friend class OpaqueValueExpr; // ctor
friend class OverloadExpr; // ctor
friend class ParenListExpr; // ctor
friend class PseudoObjectExpr; // ctor
friend class ShuffleVectorExpr; // ctor
unsigned : NumStmtBits;
unsigned ValueKind : 2;
unsigned ObjectKind : 3;
unsigned /*ExprDependence*/ Dependent : llvm::BitWidth<ExprDependence>;
};
enum { NumExprBits = NumStmtBits + 5 + llvm::BitWidth<ExprDependence> };
class ConstantExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class ConstantExpr;
unsigned : NumExprBits;
/// The kind of result that is trail-allocated.
unsigned ResultKind : 2;
/// Kind of Result as defined by APValue::Kind
unsigned APValueKind : 4;
/// When ResultKind == RSK_Int64. whether the trail-allocated integer is
/// signed.
unsigned IsUnsigned : 1;
/// When ResultKind == RSK_Int64. the BitWidth of the trail-allocated
/// integer. 7 bits because it is the minimal number of bit to represent a
/// value from 0 to 64 (the size of the trail-allocated number).
unsigned BitWidth : 7;
/// When ResultKind == RSK_APValue. Wether the ASTContext will cleanup the
/// destructor on the trail-allocated APValue.
unsigned HasCleanup : 1;
/// Whether this ConstantExpr was created for immediate invocation.
unsigned IsImmediateInvocation : 1;
};
class PredefinedExprBitfields {
friend class ASTStmtReader;
friend class PredefinedExpr;
unsigned : NumExprBits;
/// The kind of this PredefinedExpr. One of the enumeration values
/// in PredefinedExpr::IdentKind.
unsigned Kind : 4;
/// True if this PredefinedExpr has a trailing "StringLiteral *"
/// for the predefined identifier.
unsigned HasFunctionName : 1;
/// The location of this PredefinedExpr.
SourceLocation Loc;
};
class DeclRefExprBitfields {
friend class ASTStmtReader; // deserialization
friend class DeclRefExpr;
unsigned : NumExprBits;
unsigned HasQualifier : 1;
unsigned HasTemplateKWAndArgsInfo : 1;
unsigned HasFoundDecl : 1;
unsigned HadMultipleCandidates : 1;
unsigned RefersToEnclosingVariableOrCapture : 1;
unsigned NonOdrUseReason : 2;
/// The location of the declaration name itself.
SourceLocation Loc;
};
class FloatingLiteralBitfields {
friend class FloatingLiteral;
unsigned : NumExprBits;
unsigned Semantics : 3; // Provides semantics for APFloat construction
unsigned IsExact : 1;
};
class StringLiteralBitfields {
friend class ASTStmtReader;
friend class StringLiteral;
unsigned : NumExprBits;
/// The kind of this string literal.
/// One of the enumeration values of StringLiteral::StringKind.
unsigned Kind : 3;
/// The width of a single character in bytes. Only values of 1, 2,
/// and 4 bytes are supported. StringLiteral::mapCharByteWidth maps
/// the target + string kind to the appropriate CharByteWidth.
unsigned CharByteWidth : 3;
unsigned IsPascal : 1;
/// The number of concatenated token this string is made of.
/// This is the number of trailing SourceLocation.
unsigned NumConcatenated;
};
class CharacterLiteralBitfields {
friend class CharacterLiteral;
unsigned : NumExprBits;
unsigned Kind : 3;
};
class UnaryOperatorBitfields {
friend class UnaryOperator;
unsigned : NumExprBits;
unsigned Opc : 5;
unsigned CanOverflow : 1;
SourceLocation Loc;
};
class UnaryExprOrTypeTraitExprBitfields {
friend class UnaryExprOrTypeTraitExpr;
unsigned : NumExprBits;
unsigned Kind : 3;
unsigned IsType : 1; // true if operand is a type, false if an expression.
};
class ArraySubscriptExprBitfields {
friend class ArraySubscriptExpr;
unsigned : NumExprBits;
SourceLocation RBracketLoc;
};
class CallExprBitfields {
friend class CallExpr;
unsigned : NumExprBits;
unsigned NumPreArgs : 1;
/// True if the callee of the call expression was found using ADL.
unsigned UsesADL : 1;
/// Padding used to align OffsetToTrailingObjects to a byte multiple.
unsigned : 24 - 2 - NumExprBits;
/// The offset in bytes from the this pointer to the start of the
/// trailing objects belonging to CallExpr. Intentionally byte sized
/// for faster access.
unsigned OffsetToTrailingObjects : 8;
};
enum { NumCallExprBits = 32 };
class MemberExprBitfields {
friend class ASTStmtReader;
friend class MemberExpr;
unsigned : NumExprBits;
/// IsArrow - True if this is "X->F", false if this is "X.F".
unsigned IsArrow : 1;
/// True if this member expression used a nested-name-specifier to
/// refer to the member, e.g., "x->Base::f", or found its member via
/// a using declaration. When true, a MemberExprNameQualifier
/// structure is allocated immediately after the MemberExpr.
unsigned HasQualifierOrFoundDecl : 1;
/// True if this member expression specified a template keyword
/// and/or a template argument list explicitly, e.g., x->f<int>,
/// x->template f, x->template f<int>.
/// When true, an ASTTemplateKWAndArgsInfo structure and its
/// TemplateArguments (if any) are present.
unsigned HasTemplateKWAndArgsInfo : 1;
/// True if this member expression refers to a method that
/// was resolved from an overloaded set having size greater than 1.
unsigned HadMultipleCandidates : 1;
/// Value of type NonOdrUseReason indicating why this MemberExpr does
/// not constitute an odr-use of the named declaration. Meaningful only
/// when naming a static member.
unsigned NonOdrUseReason : 2;
/// This is the location of the -> or . in the expression.
SourceLocation OperatorLoc;
};
class CastExprBitfields {
friend class CastExpr;
friend class ImplicitCastExpr;
unsigned : NumExprBits;
unsigned Kind : 6;
unsigned PartOfExplicitCast : 1; // Only set for ImplicitCastExpr.
/// The number of CXXBaseSpecifiers in the cast. 14 bits would be enough
/// here. ([implimits] Direct and indirect base classes [16384]).
unsigned BasePathSize;
};
class BinaryOperatorBitfields {
friend class BinaryOperator;
unsigned : NumExprBits;
unsigned Opc : 6;
/// This is only meaningful for operations on floating point
/// types and 0 otherwise.
unsigned FPFeatures : 8;
SourceLocation OpLoc;
};
class InitListExprBitfields {
friend class InitListExpr;
unsigned : NumExprBits;
/// Whether this initializer list originally had a GNU array-range
/// designator in it. This is a temporary marker used by CodeGen.
unsigned HadArrayRangeDesignator : 1;
};
class ParenListExprBitfields {
friend class ASTStmtReader;
friend class ParenListExpr;
unsigned : NumExprBits;
/// The number of expressions in the paren list.
unsigned NumExprs;
};
class GenericSelectionExprBitfields {
friend class ASTStmtReader;
friend class GenericSelectionExpr;
unsigned : NumExprBits;
/// The location of the "_Generic".
SourceLocation GenericLoc;
};
class PseudoObjectExprBitfields {
friend class ASTStmtReader; // deserialization
friend class PseudoObjectExpr;
unsigned : NumExprBits;
// These don't need to be particularly wide, because they're
// strictly limited by the forms of expressions we permit.
unsigned NumSubExprs : 8;
unsigned ResultIndex : 32 - 8 - NumExprBits;
};
class SourceLocExprBitfields {
friend class ASTStmtReader;
friend class SourceLocExpr;
unsigned : NumExprBits;
/// The kind of source location builtin represented by the SourceLocExpr.
/// Ex. __builtin_LINE, __builtin_FUNCTION, ect.
unsigned Kind : 2;
};
class StmtExprBitfields {
friend class ASTStmtReader;
friend class StmtExpr;
unsigned : NumExprBits;
/// The number of levels of template parameters enclosing this statement
/// expression. Used to determine if a statement expression remains
/// dependent after instantiation.
unsigned TemplateDepth;
};
//===--- C++ Expression bitfields classes ---===//
class CXXOperatorCallExprBitfields {
friend class ASTStmtReader;
friend class CXXOperatorCallExpr;
unsigned : NumCallExprBits;
/// The kind of this overloaded operator. One of the enumerator
/// value of OverloadedOperatorKind.
unsigned OperatorKind : 6;
// Only meaningful for floating point types.
unsigned FPFeatures : 8;
};
class CXXRewrittenBinaryOperatorBitfields {
friend class ASTStmtReader;
friend class CXXRewrittenBinaryOperator;
unsigned : NumCallExprBits;
unsigned IsReversed : 1;
};
class CXXBoolLiteralExprBitfields {
friend class CXXBoolLiteralExpr;
unsigned : NumExprBits;
/// The value of the boolean literal.
unsigned Value : 1;
/// The location of the boolean literal.
SourceLocation Loc;
};
class CXXNullPtrLiteralExprBitfields {
friend class CXXNullPtrLiteralExpr;
unsigned : NumExprBits;
/// The location of the null pointer literal.
SourceLocation Loc;
};
class CXXThisExprBitfields {
friend class CXXThisExpr;
unsigned : NumExprBits;
/// Whether this is an implicit "this".
unsigned IsImplicit : 1;
/// The location of the "this".
SourceLocation Loc;
};
class CXXThrowExprBitfields {
friend class ASTStmtReader;
friend class CXXThrowExpr;
unsigned : NumExprBits;
/// Whether the thrown variable (if any) is in scope.
unsigned IsThrownVariableInScope : 1;
/// The location of the "throw".
SourceLocation ThrowLoc;
};
class CXXDefaultArgExprBitfields {
friend class ASTStmtReader;
friend class CXXDefaultArgExpr;
unsigned : NumExprBits;
/// The location where the default argument expression was used.
SourceLocation Loc;
};
class CXXDefaultInitExprBitfields {
friend class ASTStmtReader;
friend class CXXDefaultInitExpr;
unsigned : NumExprBits;
/// The location where the default initializer expression was used.
SourceLocation Loc;
};
class CXXScalarValueInitExprBitfields {
friend class ASTStmtReader;
friend class CXXScalarValueInitExpr;
unsigned : NumExprBits;
SourceLocation RParenLoc;
};
class CXXNewExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class CXXNewExpr;
unsigned : NumExprBits;
/// Was the usage ::new, i.e. is the global new to be used?
unsigned IsGlobalNew : 1;
/// Do we allocate an array? If so, the first trailing "Stmt *" is the
/// size expression.
unsigned IsArray : 1;
/// Should the alignment be passed to the allocation function?
unsigned ShouldPassAlignment : 1;
/// If this is an array allocation, does the usual deallocation
/// function for the allocated type want to know the allocated size?
unsigned UsualArrayDeleteWantsSize : 1;
/// What kind of initializer do we have? Could be none, parens, or braces.
/// In storage, we distinguish between "none, and no initializer expr", and
/// "none, but an implicit initializer expr".
unsigned StoredInitializationStyle : 2;
/// True if the allocated type was expressed as a parenthesized type-id.
unsigned IsParenTypeId : 1;
/// The number of placement new arguments.
unsigned NumPlacementArgs;
};
class CXXDeleteExprBitfields {
friend class ASTStmtReader;
friend class CXXDeleteExpr;
unsigned : NumExprBits;
/// Is this a forced global delete, i.e. "::delete"?
unsigned GlobalDelete : 1;
/// Is this the array form of delete, i.e. "delete[]"?
unsigned ArrayForm : 1;
/// ArrayFormAsWritten can be different from ArrayForm if 'delete' is
/// applied to pointer-to-array type (ArrayFormAsWritten will be false
/// while ArrayForm will be true).
unsigned ArrayFormAsWritten : 1;
/// Does the usual deallocation function for the element type require
/// a size_t argument?
unsigned UsualArrayDeleteWantsSize : 1;
/// Location of the expression.
SourceLocation Loc;
};
class TypeTraitExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class TypeTraitExpr;
unsigned : NumExprBits;
/// The kind of type trait, which is a value of a TypeTrait enumerator.
unsigned Kind : 8;
/// If this expression is not value-dependent, this indicates whether
/// the trait evaluated true or false.
unsigned Value : 1;
/// The number of arguments to this type trait.
unsigned NumArgs : 32 - 8 - 1 - NumExprBits;
};
class DependentScopeDeclRefExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class DependentScopeDeclRefExpr;
unsigned : NumExprBits;
/// Whether the name includes info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
};
class CXXConstructExprBitfields {
friend class ASTStmtReader;
friend class CXXConstructExpr;
unsigned : NumExprBits;
unsigned Elidable : 1;
unsigned HadMultipleCandidates : 1;
unsigned ListInitialization : 1;
unsigned StdInitListInitialization : 1;
unsigned ZeroInitialization : 1;
unsigned ConstructionKind : 3;
SourceLocation Loc;
};
class ExprWithCleanupsBitfields {
friend class ASTStmtReader; // deserialization
friend class ExprWithCleanups;
unsigned : NumExprBits;
// When false, it must not have side effects.
unsigned CleanupsHaveSideEffects : 1;
unsigned NumObjects : 32 - 1 - NumExprBits;
};
class CXXUnresolvedConstructExprBitfields {
friend class ASTStmtReader;
friend class CXXUnresolvedConstructExpr;
unsigned : NumExprBits;
/// The number of arguments used to construct the type.
unsigned NumArgs;
};
class CXXDependentScopeMemberExprBitfields {
friend class ASTStmtReader;
friend class CXXDependentScopeMemberExpr;
unsigned : NumExprBits;
/// Whether this member expression used the '->' operator or
/// the '.' operator.
unsigned IsArrow : 1;
/// Whether this member expression has info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
/// See getFirstQualifierFoundInScope() and the comment listing
/// the trailing objects.
unsigned HasFirstQualifierFoundInScope : 1;
/// The location of the '->' or '.' operator.
SourceLocation OperatorLoc;
};
class OverloadExprBitfields {
friend class ASTStmtReader;
friend class OverloadExpr;
unsigned : NumExprBits;
/// Whether the name includes info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
/// Padding used by the derived classes to store various bits. If you
/// need to add some data here, shrink this padding and add your data
/// above. NumOverloadExprBits also needs to be updated.
unsigned : 32 - NumExprBits - 1;
/// The number of results.
unsigned NumResults;
};
enum { NumOverloadExprBits = NumExprBits + 1 };
class UnresolvedLookupExprBitfields {
friend class ASTStmtReader;
friend class UnresolvedLookupExpr;
unsigned : NumOverloadExprBits;
/// True if these lookup results should be extended by
/// argument-dependent lookup if this is the operand of a function call.
unsigned RequiresADL : 1;
/// True if these lookup results are overloaded. This is pretty trivially
/// rederivable if we urgently need to kill this field.
unsigned Overloaded : 1;
};
static_assert(sizeof(UnresolvedLookupExprBitfields) <= 4,
"UnresolvedLookupExprBitfields must be <= than 4 bytes to"
"avoid trashing OverloadExprBitfields::NumResults!");
class UnresolvedMemberExprBitfields {
friend class ASTStmtReader;
friend class UnresolvedMemberExpr;
unsigned : NumOverloadExprBits;
/// Whether this member expression used the '->' operator or
/// the '.' operator.
unsigned IsArrow : 1;
/// Whether the lookup results contain an unresolved using declaration.
unsigned HasUnresolvedUsing : 1;
};
static_assert(sizeof(UnresolvedMemberExprBitfields) <= 4,
"UnresolvedMemberExprBitfields must be <= than 4 bytes to"
"avoid trashing OverloadExprBitfields::NumResults!");
class CXXNoexceptExprBitfields {
friend class ASTStmtReader;
friend class CXXNoexceptExpr;
unsigned : NumExprBits;
unsigned Value : 1;
};
class SubstNonTypeTemplateParmExprBitfields {
friend class ASTStmtReader;
friend class SubstNonTypeTemplateParmExpr;
unsigned : NumExprBits;
/// The location of the non-type template parameter reference.
SourceLocation NameLoc;
};
class RequiresExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class RequiresExpr;
unsigned : NumExprBits;
unsigned IsSatisfied : 1;
SourceLocation RequiresKWLoc;
};
//===--- C++ Coroutines TS bitfields classes ---===//
class CoawaitExprBitfields {
friend class CoawaitExpr;
unsigned : NumExprBits;
unsigned IsImplicit : 1;
};
//===--- Obj-C Expression bitfields classes ---===//
class ObjCIndirectCopyRestoreExprBitfields {
friend class ObjCIndirectCopyRestoreExpr;
unsigned : NumExprBits;
unsigned ShouldCopy : 1;
};
//===--- Clang Extensions bitfields classes ---===//
class OpaqueValueExprBitfields {
friend class ASTStmtReader;
friend class OpaqueValueExpr;
unsigned : NumExprBits;
/// The OVE is a unique semantic reference to its source expression if this
/// bit is set to true.
unsigned IsUnique : 1;
SourceLocation Loc;
};
union {
// Same order as in StmtNodes.td.
// Statements
StmtBitfields StmtBits;
NullStmtBitfields NullStmtBits;
CompoundStmtBitfields CompoundStmtBits;
LabelStmtBitfields LabelStmtBits;
AttributedStmtBitfields AttributedStmtBits;
IfStmtBitfields IfStmtBits;
SwitchStmtBitfields SwitchStmtBits;
WhileStmtBitfields WhileStmtBits;
DoStmtBitfields DoStmtBits;
ForStmtBitfields ForStmtBits;
GotoStmtBitfields GotoStmtBits;
ContinueStmtBitfields ContinueStmtBits;
BreakStmtBitfields BreakStmtBits;
ReturnStmtBitfields ReturnStmtBits;
SwitchCaseBitfields SwitchCaseBits;
// Expressions
ExprBitfields ExprBits;
ConstantExprBitfields ConstantExprBits;
PredefinedExprBitfields PredefinedExprBits;
DeclRefExprBitfields DeclRefExprBits;
FloatingLiteralBitfields FloatingLiteralBits;
StringLiteralBitfields StringLiteralBits;
CharacterLiteralBitfields CharacterLiteralBits;
UnaryOperatorBitfields UnaryOperatorBits;
UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits;
ArraySubscriptExprBitfields ArraySubscriptExprBits;
CallExprBitfields CallExprBits;
MemberExprBitfields MemberExprBits;
CastExprBitfields CastExprBits;
BinaryOperatorBitfields BinaryOperatorBits;
InitListExprBitfields InitListExprBits;
ParenListExprBitfields ParenListExprBits;
GenericSelectionExprBitfields GenericSelectionExprBits;
PseudoObjectExprBitfields PseudoObjectExprBits;
SourceLocExprBitfields SourceLocExprBits;
// GNU Extensions.
StmtExprBitfields StmtExprBits;
// C++ Expressions
CXXOperatorCallExprBitfields CXXOperatorCallExprBits;
CXXRewrittenBinaryOperatorBitfields CXXRewrittenBinaryOperatorBits;
CXXBoolLiteralExprBitfields CXXBoolLiteralExprBits;
CXXNullPtrLiteralExprBitfields CXXNullPtrLiteralExprBits;
CXXThisExprBitfields CXXThisExprBits;
CXXThrowExprBitfields CXXThrowExprBits;
CXXDefaultArgExprBitfields CXXDefaultArgExprBits;
CXXDefaultInitExprBitfields CXXDefaultInitExprBits;
CXXScalarValueInitExprBitfields CXXScalarValueInitExprBits;
CXXNewExprBitfields CXXNewExprBits;
CXXDeleteExprBitfields CXXDeleteExprBits;
TypeTraitExprBitfields TypeTraitExprBits;
DependentScopeDeclRefExprBitfields DependentScopeDeclRefExprBits;
CXXConstructExprBitfields CXXConstructExprBits;
ExprWithCleanupsBitfields ExprWithCleanupsBits;
CXXUnresolvedConstructExprBitfields CXXUnresolvedConstructExprBits;
CXXDependentScopeMemberExprBitfields CXXDependentScopeMemberExprBits;
OverloadExprBitfields OverloadExprBits;
UnresolvedLookupExprBitfields UnresolvedLookupExprBits;
UnresolvedMemberExprBitfields UnresolvedMemberExprBits;
CXXNoexceptExprBitfields CXXNoexceptExprBits;
SubstNonTypeTemplateParmExprBitfields SubstNonTypeTemplateParmExprBits;
RequiresExprBitfields RequiresExprBits;
// C++ Coroutines TS expressions
CoawaitExprBitfields CoawaitBits;
// Obj-C Expressions
ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits;
// Clang Extensions
OpaqueValueExprBitfields OpaqueValueExprBits;
};
public:
// Only allow allocation of Stmts using the allocator in ASTContext
// or by doing a placement new.
void* operator new(size_t bytes, const ASTContext& C,
unsigned alignment = 8);
void* operator new(size_t bytes, const ASTContext* C,
unsigned alignment = 8) {
return operator new(bytes, *C, alignment);
}
void *operator new(size_t bytes, void *mem) noexcept { return mem; }
void operator delete(void *, const ASTContext &, unsigned) noexcept {}
void operator delete(void *, const ASTContext *, unsigned) noexcept {}
void operator delete(void *, size_t) noexcept {}
void operator delete(void *, void *) noexcept {}
public:
/// A placeholder type used to construct an empty shell of a
/// type, that will be filled in later (e.g., by some
/// de-serialization).
struct EmptyShell {};
protected:
/// Iterator for iterating over Stmt * arrays that contain only T *.
///
/// This is needed because AST nodes use Stmt* arrays to store
/// references to children (to be compatible with StmtIterator).
template<typename T, typename TPtr = T *, typename StmtPtr = Stmt *>
struct CastIterator
: llvm::iterator_adaptor_base<CastIterator<T, TPtr, StmtPtr>, StmtPtr *,
std::random_access_iterator_tag, TPtr> {
using Base = typename CastIterator::iterator_adaptor_base;
CastIterator() : Base(nullptr) {}
CastIterator(StmtPtr *I) : Base(I) {}
typename Base::value_type operator*() const {
return cast_or_null<T>(*this->I);
}
};
/// Const iterator for iterating over Stmt * arrays that contain only T *.
template <typename T>
using ConstCastIterator = CastIterator<T, const T *const, const Stmt *const>;
using ExprIterator = CastIterator<Expr>;
using ConstExprIterator = ConstCastIterator<Expr>;
private:
/// Whether statistic collection is enabled.
static bool StatisticsEnabled;
protected:
/// Construct an empty statement.
explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {}
public:
Stmt() = delete;
Stmt(const Stmt &) = delete;
Stmt(Stmt &&) = delete;
Stmt &operator=(const Stmt &) = delete;
Stmt &operator=(Stmt &&) = delete;
Stmt(StmtClass SC) {
static_assert(sizeof(*this) <= 8,
"changing bitfields changed sizeof(Stmt)");
static_assert(sizeof(*this) % alignof(void *) == 0,
"Insufficient alignment!");
StmtBits.sClass = SC;
if (StatisticsEnabled) Stmt::addStmtClass(SC);
}
StmtClass getStmtClass() const {
return static_cast<StmtClass>(StmtBits.sClass);
}
const char *getStmtClassName() const;
/// SourceLocation tokens are not useful in isolation - they are low level
/// value objects created/interpreted by SourceManager. We assume AST
/// clients will have a pointer to the respective SourceManager.
SourceRange getSourceRange() const LLVM_READONLY;
SourceLocation getBeginLoc() const LLVM_READONLY;
SourceLocation getEndLoc() const LLVM_READONLY;
// global temp stats (until we have a per-module visitor)
static void addStmtClass(const StmtClass s);
static void EnableStatistics();
static void PrintStats();
/// Dumps the specified AST fragment and all subtrees to
/// \c llvm::errs().
void dump() const;
void dump(SourceManager &SM) const;
void dump(raw_ostream &OS, SourceManager &SM) const;
void dump(raw_ostream &OS) const;
/// \return Unique reproducible object identifier
int64_t getID(const ASTContext &Context) const;
/// dumpColor - same as dump(), but forces color highlighting.
void dumpColor() const;
/// dumpPretty/printPretty - These two methods do a "pretty print" of the AST
/// back to its original source language syntax.
void dumpPretty(const ASTContext &Context) const;
void printPretty(raw_ostream &OS, PrinterHelper *Helper,
const PrintingPolicy &Policy, unsigned Indentation = 0,
StringRef NewlineSymbol = "\n",
const ASTContext *Context = nullptr) const;
/// Pretty-prints in JSON format.
void printJson(raw_ostream &Out, PrinterHelper *Helper,
const PrintingPolicy &Policy, bool AddQuotes) const;
/// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only
/// works on systems with GraphViz (Mac OS X) or dot+gv installed.
void viewAST() const;
/// Skip no-op (attributed, compound) container stmts and skip captured
/// stmt at the top, if \a IgnoreCaptured is true.
Stmt *IgnoreContainers(bool IgnoreCaptured = false);
const Stmt *IgnoreContainers(bool IgnoreCaptured = false) const {
return const_cast<Stmt *>(this)->IgnoreContainers(IgnoreCaptured);
}
const Stmt *stripLabelLikeStatements() const;
Stmt *stripLabelLikeStatements() {
return const_cast<Stmt*>(
const_cast<const Stmt*>(this)->stripLabelLikeStatements());
}
/// Child Iterators: All subclasses must implement 'children'
/// to permit easy iteration over the substatements/subexpessions of an
/// AST node. This permits easy iteration over all nodes in the AST.
using child_iterator = StmtIterator;
using const_child_iterator = ConstStmtIterator;
using child_range = llvm::iterator_range<child_iterator>;
using const_child_range = llvm::iterator_range<const_child_iterator>;
child_range children();
const_child_range children() const {
auto Children = const_cast<Stmt *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_iterator child_begin() { return children().begin(); }
child_iterator child_end() { return children().end(); }
const_child_iterator child_begin() const { return children().begin(); }
const_child_iterator child_end() const { return children().end(); }
/// Produce a unique representation of the given statement.
///
/// \param ID once the profiling operation is complete, will contain
/// the unique representation of the given statement.
///
/// \param Context the AST context in which the statement resides
///
/// \param Canonical whether the profile should be based on the canonical
/// representation of this statement (e.g., where non-type template
/// parameters are identified by index/level rather than their
/// declaration pointers) or the exact representation of the statement as
/// written in the source.
void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
bool Canonical) const;
/// Calculate a unique representation for a statement that is
/// stable across compiler invocations.
///
/// \param ID profile information will be stored in ID.
///
/// \param Hash an ODRHash object which will be called where pointers would
/// have been used in the Profile function.
void ProcessODRHash(llvm::FoldingSetNodeID &ID, ODRHash& Hash) const;
};
/// DeclStmt - Adaptor class for mixing declarations with statements and
/// expressions. For example, CompoundStmt mixes statements, expressions
/// and declarations (variables, types). Another example is ForStmt, where
/// the first statement can be an expression or a declaration.
class DeclStmt : public Stmt {
DeclGroupRef DG;
SourceLocation StartLoc, EndLoc;
public:
DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc)
: Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {}
/// Build an empty declaration statement.
explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) {}
/// isSingleDecl - This method returns true if this DeclStmt refers
/// to a single Decl.
bool isSingleDecl() const { return DG.isSingleDecl(); }
const Decl *getSingleDecl() const { return DG.getSingleDecl(); }
Decl *getSingleDecl() { return DG.getSingleDecl(); }
const DeclGroupRef getDeclGroup() const { return DG; }
DeclGroupRef getDeclGroup() { return DG; }
void setDeclGroup(DeclGroupRef DGR) { DG = DGR; }
void setStartLoc(SourceLocation L) { StartLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
void setEndLoc(SourceLocation L) { EndLoc = L; }
SourceLocation getBeginLoc() const LLVM_READONLY { return StartLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == DeclStmtClass;
}
// Iterators over subexpressions.
child_range children() {
return child_range(child_iterator(DG.begin(), DG.end()),
child_iterator(DG.end(), DG.end()));
}
const_child_range children() const {
auto Children = const_cast<DeclStmt *>(this)->children();
return const_child_range(Children);
}
using decl_iterator = DeclGroupRef::iterator;
using const_decl_iterator = DeclGroupRef::const_iterator;
using decl_range = llvm::iterator_range<decl_iterator>;
using decl_const_range = llvm::iterator_range<const_decl_iterator>;
decl_range decls() { return decl_range(decl_begin(), decl_end()); }
decl_const_range decls() const {
return decl_const_range(decl_begin(), decl_end());
}
decl_iterator decl_begin() { return DG.begin(); }
decl_iterator decl_end() { return DG.end(); }
const_decl_iterator decl_begin() const { return DG.begin(); }
const_decl_iterator decl_end() const { return DG.end(); }
using reverse_decl_iterator = std::reverse_iterator<decl_iterator>;
reverse_decl_iterator decl_rbegin() {
return reverse_decl_iterator(decl_end());
}
reverse_decl_iterator decl_rend() {
return reverse_decl_iterator(decl_begin());
}
};
/// NullStmt - This is the null statement ";": C99 6.8.3p3.
///
class NullStmt : public Stmt {
public:
NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false)
: Stmt(NullStmtClass) {
NullStmtBits.HasLeadingEmptyMacro = hasLeadingEmptyMacro;
setSemiLoc(L);
}
/// Build an empty null statement.
explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty) {}
SourceLocation getSemiLoc() const { return NullStmtBits.SemiLoc; }
void setSemiLoc(SourceLocation L) { NullStmtBits.SemiLoc = L; }
bool hasLeadingEmptyMacro() const {
return NullStmtBits.HasLeadingEmptyMacro;
}
SourceLocation getBeginLoc() const { return getSemiLoc(); }
SourceLocation getEndLoc() const { return getSemiLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == NullStmtClass;
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// CompoundStmt - This represents a group of statements like { stmt stmt }.
class CompoundStmt final : public Stmt,
private llvm::TrailingObjects<CompoundStmt, Stmt *> {
friend class ASTStmtReader;
friend TrailingObjects;
/// The location of the closing "}". LBraceLoc is stored in CompoundStmtBits.
SourceLocation RBraceLoc;
CompoundStmt(ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB);
explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty) {}
void setStmts(ArrayRef<Stmt *> Stmts);
public:
static CompoundStmt *Create(const ASTContext &C, ArrayRef<Stmt *> Stmts,
SourceLocation LB, SourceLocation RB);
// Build an empty compound statement with a location.
explicit CompoundStmt(SourceLocation Loc)
: Stmt(CompoundStmtClass), RBraceLoc(Loc) {
CompoundStmtBits.NumStmts = 0;
CompoundStmtBits.LBraceLoc = Loc;
}
// Build an empty compound statement.
static CompoundStmt *CreateEmpty(const ASTContext &C, unsigned NumStmts);
bool body_empty() const { return CompoundStmtBits.NumStmts == 0; }
unsigned size() const { return CompoundStmtBits.NumStmts; }
using body_iterator = Stmt **;
using body_range = llvm::iterator_range<body_iterator>;
body_range body() { return body_range(body_begin(), body_end()); }
body_iterator body_begin() { return getTrailingObjects<Stmt *>(); }
body_iterator body_end() { return body_begin() + size(); }
Stmt *body_front() { return !body_empty() ? body_begin()[0] : nullptr; }
Stmt *body_back() {
return !body_empty() ? body_begin()[size() - 1] : nullptr;
}
using const_body_iterator = Stmt *const *;
using body_const_range = llvm::iterator_range<const_body_iterator>;
body_const_range body() const {
return body_const_range(body_begin(), body_end());
}
const_body_iterator body_begin() const {
return getTrailingObjects<Stmt *>();
}
const_body_iterator body_end() const { return body_begin() + size(); }
const Stmt *body_front() const {
return !body_empty() ? body_begin()[0] : nullptr;
}
const Stmt *body_back() const {
return !body_empty() ? body_begin()[size() - 1] : nullptr;
}
using reverse_body_iterator = std::reverse_iterator<body_iterator>;
reverse_body_iterator body_rbegin() {
return reverse_body_iterator(body_end());
}
reverse_body_iterator body_rend() {
return reverse_body_iterator(body_begin());
}
using const_reverse_body_iterator =
std::reverse_iterator<const_body_iterator>;
const_reverse_body_iterator body_rbegin() const {
return const_reverse_body_iterator(body_end());
}
const_reverse_body_iterator body_rend() const {
return const_reverse_body_iterator(body_begin());
}
// Get the Stmt that StmtExpr would consider to be the result of this
// compound statement. This is used by StmtExpr to properly emulate the GCC
// compound expression extension, which ignores trailing NullStmts when
// getting the result of the expression.
// i.e. ({ 5;;; })
// ^^ ignored
// If we don't find something that isn't a NullStmt, just return the last
// Stmt.
Stmt *getStmtExprResult() {
for (auto *B : llvm::reverse(body())) {
if (!isa<NullStmt>(B))
return B;
}
return body_back();
}
const Stmt *getStmtExprResult() const {
return const_cast<CompoundStmt *>(this)->getStmtExprResult();
}
SourceLocation getBeginLoc() const { return CompoundStmtBits.LBraceLoc; }
SourceLocation getEndLoc() const { return RBraceLoc; }
SourceLocation getLBracLoc() const { return CompoundStmtBits.LBraceLoc; }
SourceLocation getRBracLoc() const { return RBraceLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == CompoundStmtClass;
}
// Iterators
child_range children() { return child_range(body_begin(), body_end()); }
const_child_range children() const {
return const_child_range(body_begin(), body_end());
}
};
// SwitchCase is the base class for CaseStmt and DefaultStmt,
class SwitchCase : public Stmt {
protected:
/// The location of the ":".
SourceLocation ColonLoc;
// The location of the "case" or "default" keyword. Stored in SwitchCaseBits.
// SourceLocation KeywordLoc;
/// A pointer to the following CaseStmt or DefaultStmt class,
/// used by SwitchStmt.
SwitchCase *NextSwitchCase = nullptr;
SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc)
: Stmt(SC), ColonLoc(ColonLoc) {
setKeywordLoc(KWLoc);
}
SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC) {}
public:
const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; }
SwitchCase *getNextSwitchCase() { return NextSwitchCase; }
void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; }
SourceLocation getKeywordLoc() const { return SwitchCaseBits.KeywordLoc; }
void setKeywordLoc(SourceLocation L) { SwitchCaseBits.KeywordLoc = L; }
SourceLocation getColonLoc() const { return ColonLoc; }
void setColonLoc(SourceLocation L) { ColonLoc = L; }
inline Stmt *getSubStmt();
const Stmt *getSubStmt() const {
return const_cast<SwitchCase *>(this)->getSubStmt();
}
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
inline SourceLocation getEndLoc() const LLVM_READONLY;
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass ||
T->getStmtClass() == DefaultStmtClass;
}
};
/// CaseStmt - Represent a case statement. It can optionally be a GNU case
/// statement of the form LHS ... RHS representing a range of cases.
class CaseStmt final
: public SwitchCase,
private llvm::TrailingObjects<CaseStmt, Stmt *, SourceLocation> {
friend TrailingObjects;
// CaseStmt is followed by several trailing objects, some of which optional.
// Note that it would be more convenient to put the optional trailing objects
// at the end but this would impact children().
// The trailing objects are in order:
//
// * A "Stmt *" for the LHS of the case statement. Always present.
//
// * A "Stmt *" for the RHS of the case statement. This is a GNU extension
// which allow ranges in cases statement of the form LHS ... RHS.
// Present if and only if caseStmtIsGNURange() is true.
//
// * A "Stmt *" for the substatement of the case statement. Always present.
//
// * A SourceLocation for the location of the ... if this is a case statement
// with a range. Present if and only if caseStmtIsGNURange() is true.
enum { LhsOffset = 0, SubStmtOffsetFromRhs = 1 };
enum { NumMandatoryStmtPtr = 2 };
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + caseStmtIsGNURange();
}
unsigned numTrailingObjects(OverloadToken<SourceLocation>) const {
return caseStmtIsGNURange();
}
unsigned lhsOffset() const { return LhsOffset; }
unsigned rhsOffset() const { return LhsOffset + caseStmtIsGNURange(); }
unsigned subStmtOffset() const { return rhsOffset() + SubStmtOffsetFromRhs; }
/// Build a case statement assuming that the storage for the
/// trailing objects has been properly allocated.
CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc,
SourceLocation ellipsisLoc, SourceLocation colonLoc)
: SwitchCase(CaseStmtClass, caseLoc, colonLoc) {
// Handle GNU case statements of the form LHS ... RHS.
bool IsGNURange = rhs != nullptr;
SwitchCaseBits.CaseStmtIsGNURange = IsGNURange;
setLHS(lhs);
setSubStmt(nullptr);
if (IsGNURange) {
setRHS(rhs);
setEllipsisLoc(ellipsisLoc);
}
}
/// Build an empty switch case statement.
explicit CaseStmt(EmptyShell Empty, bool CaseStmtIsGNURange)
: SwitchCase(CaseStmtClass, Empty) {
SwitchCaseBits.CaseStmtIsGNURange = CaseStmtIsGNURange;
}
public:
/// Build a case statement.
static CaseStmt *Create(const ASTContext &Ctx, Expr *lhs, Expr *rhs,
SourceLocation caseLoc, SourceLocation ellipsisLoc,
SourceLocation colonLoc);
/// Build an empty case statement.
static CaseStmt *CreateEmpty(const ASTContext &Ctx, bool CaseStmtIsGNURange);
/// True if this case statement is of the form case LHS ... RHS, which
/// is a GNU extension. In this case the RHS can be obtained with getRHS()
/// and the location of the ellipsis can be obtained with getEllipsisLoc().
bool caseStmtIsGNURange() const { return SwitchCaseBits.CaseStmtIsGNURange; }
SourceLocation getCaseLoc() const { return getKeywordLoc(); }
void setCaseLoc(SourceLocation L) { setKeywordLoc(L); }
/// Get the location of the ... in a case statement of the form LHS ... RHS.
SourceLocation getEllipsisLoc() const {
return caseStmtIsGNURange() ? *getTrailingObjects<SourceLocation>()
: SourceLocation();
}
/// Set the location of the ... in a case statement of the form LHS ... RHS.
/// Assert that this case statement is of this form.
void setEllipsisLoc(SourceLocation L) {
assert(
caseStmtIsGNURange() &&
"setEllipsisLoc but this is not a case stmt of the form LHS ... RHS!");
*getTrailingObjects<SourceLocation>() = L;
}
Expr *getLHS() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]);
}
const Expr *getLHS() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]);
}
void setLHS(Expr *Val) {
getTrailingObjects<Stmt *>()[lhsOffset()] = reinterpret_cast<Stmt *>(Val);
}
Expr *getRHS() {
return caseStmtIsGNURange() ? reinterpret_cast<Expr *>(
getTrailingObjects<Stmt *>()[rhsOffset()])
: nullptr;
}
const Expr *getRHS() const {
return caseStmtIsGNURange() ? reinterpret_cast<Expr *>(
getTrailingObjects<Stmt *>()[rhsOffset()])
: nullptr;
}
void setRHS(Expr *Val) {
assert(caseStmtIsGNURange() &&
"setRHS but this is not a case stmt of the form LHS ... RHS!");
getTrailingObjects<Stmt *>()[rhsOffset()] = reinterpret_cast<Stmt *>(Val);
}
Stmt *getSubStmt() { return getTrailingObjects<Stmt *>()[subStmtOffset()]; }
const Stmt *getSubStmt() const {
return getTrailingObjects<Stmt *>()[subStmtOffset()];
}
void setSubStmt(Stmt *S) {
getTrailingObjects<Stmt *>()[subStmtOffset()] = S;
}
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
// Handle deeply nested case statements with iteration instead of recursion.
const CaseStmt *CS = this;
while (const auto *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt()))
CS = CS2;
return CS->getSubStmt()->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass;
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
};
class DefaultStmt : public SwitchCase {
Stmt *SubStmt;
public:
DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt)
: SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {}
/// Build an empty default statement.
explicit DefaultStmt(EmptyShell Empty)
: SwitchCase(DefaultStmtClass, Empty) {}
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setSubStmt(Stmt *S) { SubStmt = S; }
SourceLocation getDefaultLoc() const { return getKeywordLoc(); }
void setDefaultLoc(SourceLocation L) { setKeywordLoc(L); }
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return SubStmt->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == DefaultStmtClass;
}
// Iterators
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
const_child_range children() const {
return const_child_range(&SubStmt, &SubStmt + 1);
}
};
SourceLocation SwitchCase::getEndLoc() const {
if (const auto *CS = dyn_cast<CaseStmt>(this))
return CS->getEndLoc();
else if (const auto *DS = dyn_cast<DefaultStmt>(this))
return DS->getEndLoc();
llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!");
}
Stmt *SwitchCase::getSubStmt() {
if (auto *CS = dyn_cast<CaseStmt>(this))
return CS->getSubStmt();
else if (auto *DS = dyn_cast<DefaultStmt>(this))
return DS->getSubStmt();
llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!");
}
/// Represents a statement that could possibly have a value and type. This
/// covers expression-statements, as well as labels and attributed statements.
///
/// Value statements have a special meaning when they are the last non-null
/// statement in a GNU statement expression, where they determine the value
/// of the statement expression.
class ValueStmt : public Stmt {
protected:
using Stmt::Stmt;
public:
const Expr *getExprStmt() const;
Expr *getExprStmt() {
const ValueStmt *ConstThis = this;
return const_cast<Expr*>(ConstThis->getExprStmt());
}
static bool classof(const Stmt *T) {
return T->getStmtClass() >= firstValueStmtConstant &&
T->getStmtClass() <= lastValueStmtConstant;
}
};
/// LabelStmt - Represents a label, which has a substatement. For example:
/// foo: return;
class LabelStmt : public ValueStmt {
LabelDecl *TheDecl;
Stmt *SubStmt;
public:
/// Build a label statement.
LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt)
: ValueStmt(LabelStmtClass), TheDecl(D), SubStmt(substmt) {
setIdentLoc(IL);
}
/// Build an empty label statement.
explicit LabelStmt(EmptyShell Empty) : ValueStmt(LabelStmtClass, Empty) {}
SourceLocation getIdentLoc() const { return LabelStmtBits.IdentLoc; }
void setIdentLoc(SourceLocation L) { LabelStmtBits.IdentLoc = L; }
LabelDecl *getDecl() const { return TheDecl; }
void setDecl(LabelDecl *D) { TheDecl = D; }
const char *getName() const;
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setSubStmt(Stmt *SS) { SubStmt = SS; }
SourceLocation getBeginLoc() const { return getIdentLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();}
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
const_child_range children() const {
return const_child_range(&SubStmt, &SubStmt + 1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == LabelStmtClass;
}
};
/// Represents an attribute applied to a statement.
///
/// Represents an attribute applied to a statement. For example:
/// [[omp::for(...)]] for (...) { ... }
class AttributedStmt final
: public ValueStmt,
private llvm::TrailingObjects<AttributedStmt, const Attr *> {
friend class ASTStmtReader;
friend TrailingObjects;
Stmt *SubStmt;
AttributedStmt(SourceLocation Loc, ArrayRef<const Attr *> Attrs,
Stmt *SubStmt)
: ValueStmt(AttributedStmtClass), SubStmt(SubStmt) {
AttributedStmtBits.NumAttrs = Attrs.size();
AttributedStmtBits.AttrLoc = Loc;
std::copy(Attrs.begin(), Attrs.end(), getAttrArrayPtr());
}
explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs)
: ValueStmt(AttributedStmtClass, Empty) {
AttributedStmtBits.NumAttrs = NumAttrs;
AttributedStmtBits.AttrLoc = SourceLocation{};
std::fill_n(getAttrArrayPtr(), NumAttrs, nullptr);
}
const Attr *const *getAttrArrayPtr() const {
return getTrailingObjects<const Attr *>();
}
const Attr **getAttrArrayPtr() { return getTrailingObjects<const Attr *>(); }
public:
static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc,
ArrayRef<const Attr *> Attrs, Stmt *SubStmt);
// Build an empty attributed statement.
static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs);
SourceLocation getAttrLoc() const { return AttributedStmtBits.AttrLoc; }
ArrayRef<const Attr *> getAttrs() const {
return llvm::makeArrayRef(getAttrArrayPtr(), AttributedStmtBits.NumAttrs);
}
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
SourceLocation getBeginLoc() const { return getAttrLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();}
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
const_child_range children() const {
return const_child_range(&SubStmt, &SubStmt + 1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == AttributedStmtClass;
}
};
/// IfStmt - This represents an if/then/else.
class IfStmt final
: public Stmt,
private llvm::TrailingObjects<IfStmt, Stmt *, SourceLocation> {
friend TrailingObjects;
// IfStmt is followed by several trailing objects, some of which optional.
// Note that it would be more convenient to put the optional trailing
// objects at then end but this would change the order of the children.
// The trailing objects are in order:
//
// * A "Stmt *" for the init statement.
// Present if and only if hasInitStorage().
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact a "Expr *".
//
// * A "Stmt *" for the then statement.
// Always present.
//
// * A "Stmt *" for the else statement.
// Present if and only if hasElseStorage().
//
// * A "SourceLocation" for the location of the "else".
// Present if and only if hasElseStorage().
enum { InitOffset = 0, ThenOffsetFromCond = 1, ElseOffsetFromCond = 2 };
enum { NumMandatoryStmtPtr = 2 };
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasElseStorage() + hasVarStorage() +
hasInitStorage();
}
unsigned numTrailingObjects(OverloadToken<SourceLocation>) const {
return hasElseStorage();
}
unsigned initOffset() const { return InitOffset; }
unsigned varOffset() const { return InitOffset + hasInitStorage(); }
unsigned condOffset() const {
return InitOffset + hasInitStorage() + hasVarStorage();
}
unsigned thenOffset() const { return condOffset() + ThenOffsetFromCond; }
unsigned elseOffset() const { return condOffset() + ElseOffsetFromCond; }
/// Build an if/then/else statement.
IfStmt(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, Stmt *Init,
VarDecl *Var, Expr *Cond, Stmt *Then, SourceLocation EL, Stmt *Else);
/// Build an empty if/then/else statement.
explicit IfStmt(EmptyShell Empty, bool HasElse, bool HasVar, bool HasInit);
public:
/// Create an IfStmt.
static IfStmt *Create(const ASTContext &Ctx, SourceLocation IL,
bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond,
Stmt *Then, SourceLocation EL = SourceLocation(),
Stmt *Else = nullptr);
/// Create an empty IfStmt optionally with storage for an else statement,
/// condition variable and init expression.
static IfStmt *CreateEmpty(const ASTContext &Ctx, bool HasElse, bool HasVar,
bool HasInit);
/// True if this IfStmt has the storage for an init statement.
bool hasInitStorage() const { return IfStmtBits.HasInit; }
/// True if this IfStmt has storage for a variable declaration.
bool hasVarStorage() const { return IfStmtBits.HasVar; }
/// True if this IfStmt has storage for an else statement.
bool hasElseStorage() const { return IfStmtBits.HasElse; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getThen() { return getTrailingObjects<Stmt *>()[thenOffset()]; }
const Stmt *getThen() const {
return getTrailingObjects<Stmt *>()[thenOffset()];
}
void setThen(Stmt *Then) {
getTrailingObjects<Stmt *>()[thenOffset()] = Then;
}
Stmt *getElse() {
return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()]
: nullptr;
}
const Stmt *getElse() const {
return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()]
: nullptr;
}
void setElse(Stmt *Else) {
assert(hasElseStorage() &&
"This if statement has no storage for an else statement!");
getTrailingObjects<Stmt *>()[elseOffset()] = Else;
}
/// Retrieve the variable declared in this "if" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// if (int x = foo()) {
/// printf("x is %d", x);
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<IfStmt *>(this)->getConditionVariable();
}
/// Set the condition variable for this if statement.
/// The if statement must have storage for the condition variable.
void setConditionVariable(const ASTContext &Ctx, VarDecl *V);
/// If this IfStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
Stmt *getInit() {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
const Stmt *getInit() const {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
void setInit(Stmt *Init) {
assert(hasInitStorage() &&
"This if statement has no storage for an init statement!");
getTrailingObjects<Stmt *>()[initOffset()] = Init;
}
SourceLocation getIfLoc() const { return IfStmtBits.IfLoc; }
void setIfLoc(SourceLocation IfLoc) { IfStmtBits.IfLoc = IfLoc; }
SourceLocation getElseLoc() const {
return hasElseStorage() ? *getTrailingObjects<SourceLocation>()
: SourceLocation();
}
void setElseLoc(SourceLocation ElseLoc) {
assert(hasElseStorage() &&
"This if statement has no storage for an else statement!");
*getTrailingObjects<SourceLocation>() = ElseLoc;
}
bool isConstexpr() const { return IfStmtBits.IsConstexpr; }
void setConstexpr(bool C) { IfStmtBits.IsConstexpr = C; }
/// If this is an 'if constexpr', determine which substatement will be taken.
/// Otherwise, or if the condition is value-dependent, returns None.
Optional<const Stmt*> getNondiscardedCase(const ASTContext &Ctx) const;
bool isObjCAvailabilityCheck() const;
SourceLocation getBeginLoc() const { return getIfLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
if (getElse())
return getElse()->getEndLoc();
return getThen()->getEndLoc();
}
// Iterators over subexpressions. The iterators will include iterating
// over the initialization expression referenced by the condition variable.
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == IfStmtClass;
}
};
/// SwitchStmt - This represents a 'switch' stmt.
class SwitchStmt final : public Stmt,
private llvm::TrailingObjects<SwitchStmt, Stmt *> {
friend TrailingObjects;
/// Points to a linked list of case and default statements.
SwitchCase *FirstCase;
// SwitchStmt is followed by several trailing objects,
// some of which optional. Note that it would be more convenient to
// put the optional trailing objects at the end but this would change
// the order in children().
// The trailing objects are in order:
//
// * A "Stmt *" for the init statement.
// Present if and only if hasInitStorage().
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact an "Expr *".
//
// * A "Stmt *" for the body.
// Always present.
enum { InitOffset = 0, BodyOffsetFromCond = 1 };
enum { NumMandatoryStmtPtr = 2 };
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasInitStorage() + hasVarStorage();
}
unsigned initOffset() const { return InitOffset; }
unsigned varOffset() const { return InitOffset + hasInitStorage(); }
unsigned condOffset() const {
return InitOffset + hasInitStorage() + hasVarStorage();
}
unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; }
/// Build a switch statement.
SwitchStmt(const ASTContext &Ctx, Stmt *Init, VarDecl *Var, Expr *Cond);
/// Build a empty switch statement.
explicit SwitchStmt(EmptyShell Empty, bool HasInit, bool HasVar);
public:
/// Create a switch statement.
static SwitchStmt *Create(const ASTContext &Ctx, Stmt *Init, VarDecl *Var,
Expr *Cond);
/// Create an empty switch statement optionally with storage for
/// an init expression and a condition variable.
static SwitchStmt *CreateEmpty(const ASTContext &Ctx, bool HasInit,
bool HasVar);
/// True if this SwitchStmt has storage for an init statement.
bool hasInitStorage() const { return SwitchStmtBits.HasInit; }
/// True if this SwitchStmt has storage for a condition variable.
bool hasVarStorage() const { return SwitchStmtBits.HasVar; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; }
const Stmt *getBody() const {
return getTrailingObjects<Stmt *>()[bodyOffset()];
}
void setBody(Stmt *Body) {
getTrailingObjects<Stmt *>()[bodyOffset()] = Body;
}
Stmt *getInit() {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
const Stmt *getInit() const {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
void setInit(Stmt *Init) {
assert(hasInitStorage() &&
"This switch statement has no storage for an init statement!");
getTrailingObjects<Stmt *>()[initOffset()] = Init;
}
/// Retrieve the variable declared in this "switch" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// switch (int x = foo()) {
/// case 0: break;
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<SwitchStmt *>(this)->getConditionVariable();
}
/// Set the condition variable in this switch statement.
/// The switch statement must have storage for it.
void setConditionVariable(const ASTContext &Ctx, VarDecl *VD);
/// If this SwitchStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
SwitchCase *getSwitchCaseList() { return FirstCase; }
const SwitchCase *getSwitchCaseList() const { return FirstCase; }
void setSwitchCaseList(SwitchCase *SC) { FirstCase = SC; }
SourceLocation getSwitchLoc() const { return SwitchStmtBits.SwitchLoc; }
void setSwitchLoc(SourceLocation L) { SwitchStmtBits.SwitchLoc = L; }
void setBody(Stmt *S, SourceLocation SL) {
setBody(S);
setSwitchLoc(SL);
}
void addSwitchCase(SwitchCase *SC) {
assert(!SC->getNextSwitchCase() &&
"case/default already added to a switch");
SC->setNextSwitchCase(FirstCase);
FirstCase = SC;
}
/// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a
/// switch over an enum value then all cases have been explicitly covered.
void setAllEnumCasesCovered() { SwitchStmtBits.AllEnumCasesCovered = true; }
/// Returns true if the SwitchStmt is a switch of an enum value and all cases
/// have been explicitly covered.
bool isAllEnumCasesCovered() const {
return SwitchStmtBits.AllEnumCasesCovered;
}
SourceLocation getBeginLoc() const { return getSwitchLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return getBody() ? getBody()->getEndLoc()
: reinterpret_cast<const Stmt *>(getCond())->getEndLoc();
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SwitchStmtClass;
}
};
/// WhileStmt - This represents a 'while' stmt.
class WhileStmt final : public Stmt,
private llvm::TrailingObjects<WhileStmt, Stmt *> {
friend TrailingObjects;
// WhileStmt is followed by several trailing objects,
// some of which optional. Note that it would be more
// convenient to put the optional trailing object at the end
// but this would affect children().
// The trailing objects are in order:
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact an "Expr *".
//
// * A "Stmt *" for the body.
// Always present.
//
enum { VarOffset = 0, BodyOffsetFromCond = 1 };
enum { NumMandatoryStmtPtr = 2 };
unsigned varOffset() const { return VarOffset; }
unsigned condOffset() const { return VarOffset + hasVarStorage(); }
unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; }
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasVarStorage();
}
/// Build a while statement.
WhileStmt(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body,
SourceLocation WL);
/// Build an empty while statement.
explicit WhileStmt(EmptyShell Empty, bool HasVar);
public:
/// Create a while statement.
static WhileStmt *Create(const ASTContext &Ctx, VarDecl *Var, Expr *Cond,
Stmt *Body, SourceLocation WL);
/// Create an empty while statement optionally with storage for
/// a condition variable.
static WhileStmt *CreateEmpty(const ASTContext &Ctx, bool HasVar);
/// True if this WhileStmt has storage for a condition variable.
bool hasVarStorage() const { return WhileStmtBits.HasVar; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; }
const Stmt *getBody() const {
return getTrailingObjects<Stmt *>()[bodyOffset()];
}
void setBody(Stmt *Body) {
getTrailingObjects<Stmt *>()[bodyOffset()] = Body;
}
/// Retrieve the variable declared in this "while" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// while (int x = random()) {
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<WhileStmt *>(this)->getConditionVariable();
}
/// Set the condition variable of this while statement.
/// The while statement must have storage for it.
void setConditionVariable(const ASTContext &Ctx, VarDecl *V);
/// If this WhileStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
SourceLocation getWhileLoc() const { return WhileStmtBits.WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileStmtBits.WhileLoc = L; }
SourceLocation getBeginLoc() const { return getWhileLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return getBody()->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == WhileStmtClass;
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
};
/// DoStmt - This represents a 'do/while' stmt.
class DoStmt : public Stmt {
enum { BODY, COND, END_EXPR };
Stmt *SubExprs[END_EXPR];
SourceLocation WhileLoc;
SourceLocation RParenLoc; // Location of final ')' in do stmt condition.
public:
DoStmt(Stmt *Body, Expr *Cond, SourceLocation DL, SourceLocation WL,
SourceLocation RP)
: Stmt(DoStmtClass), WhileLoc(WL), RParenLoc(RP) {
setCond(Cond);
setBody(Body);
setDoLoc(DL);
}
/// Build an empty do-while statement.
explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) {}
Expr *getCond() { return reinterpret_cast<Expr *>(SubExprs[COND]); }
const Expr *getCond() const {
return reinterpret_cast<Expr *>(SubExprs[COND]);
}
void setCond(Expr *Cond) { SubExprs[COND] = reinterpret_cast<Stmt *>(Cond); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setBody(Stmt *Body) { SubExprs[BODY] = Body; }
SourceLocation getDoLoc() const { return DoStmtBits.DoLoc; }
void setDoLoc(SourceLocation L) { DoStmtBits.DoLoc = L; }
SourceLocation getWhileLoc() const { return WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getBeginLoc() const { return getDoLoc(); }
SourceLocation getEndLoc() const { return getRParenLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == DoStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
const_child_range children() const {
return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
};
/// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of
/// the init/cond/inc parts of the ForStmt will be null if they were not
/// specified in the source.
class ForStmt : public Stmt {
enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR };
Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt.
SourceLocation LParenLoc, RParenLoc;
public:
ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar,
Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP,
SourceLocation RP);
/// Build an empty for statement.
explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) {}
Stmt *getInit() { return SubExprs[INIT]; }
/// Retrieve the variable declared in this "for" statement, if any.
///
/// In the following example, "y" is the condition variable.
/// \code
/// for (int x = random(); int y = mangle(x); ++x) {
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable() const;
void setConditionVariable(const ASTContext &C, VarDecl *V);
/// If this ForStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
const DeclStmt *getConditionVariableDeclStmt() const {
return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]);
}
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); }
Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getInit() const { return SubExprs[INIT]; }
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setInit(Stmt *S) { SubExprs[INIT] = S; }
void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); }
void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); }
void setBody(Stmt *S) { SubExprs[BODY] = S; }
SourceLocation getForLoc() const { return ForStmtBits.ForLoc; }
void setForLoc(SourceLocation L) { ForStmtBits.ForLoc = L; }
SourceLocation getLParenLoc() const { return LParenLoc; }
void setLParenLoc(SourceLocation L) { LParenLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getBeginLoc() const { return getForLoc(); }
SourceLocation getEndLoc() const { return getBody()->getEndLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == ForStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
}
const_child_range children() const {
return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
};
/// GotoStmt - This represents a direct goto.
class GotoStmt : public Stmt {
LabelDecl *Label;
SourceLocation LabelLoc;
public:
GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL)
: Stmt(GotoStmtClass), Label(label), LabelLoc(LL) {
setGotoLoc(GL);
}
/// Build an empty goto statement.
explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) {}
LabelDecl *getLabel() const { return Label; }
void setLabel(LabelDecl *D) { Label = D; }
SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; }
void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; }
SourceLocation getLabelLoc() const { return LabelLoc; }
void setLabelLoc(SourceLocation L) { LabelLoc = L; }
SourceLocation getBeginLoc() const { return getGotoLoc(); }
SourceLocation getEndLoc() const { return getLabelLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == GotoStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// IndirectGotoStmt - This represents an indirect goto.
class IndirectGotoStmt : public Stmt {
SourceLocation StarLoc;
Stmt *Target;
public:
IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc, Expr *target)
: Stmt(IndirectGotoStmtClass), StarLoc(starLoc) {
setTarget(target);
setGotoLoc(gotoLoc);
}
/// Build an empty indirect goto statement.
explicit IndirectGotoStmt(EmptyShell Empty)
: Stmt(IndirectGotoStmtClass, Empty) {}
void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; }
SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; }
void setStarLoc(SourceLocation L) { StarLoc = L; }
SourceLocation getStarLoc() const { return StarLoc; }
Expr *getTarget() { return reinterpret_cast<Expr *>(Target); }
const Expr *getTarget() const {
return reinterpret_cast<const Expr *>(Target);
}
void setTarget(Expr *E) { Target = reinterpret_cast<Stmt *>(E); }
/// getConstantTarget - Returns the fixed target of this indirect
/// goto, if one exists.
LabelDecl *getConstantTarget();
const LabelDecl *getConstantTarget() const {
return const_cast<IndirectGotoStmt *>(this)->getConstantTarget();
}
SourceLocation getBeginLoc() const { return getGotoLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return Target->getEndLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == IndirectGotoStmtClass;
}
// Iterators
child_range children() { return child_range(&Target, &Target + 1); }
const_child_range children() const {
return const_child_range(&Target, &Target + 1);
}
};
/// ContinueStmt - This represents a continue.
class ContinueStmt : public Stmt {
public:
ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass) {
setContinueLoc(CL);
}
/// Build an empty continue statement.
explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) {}
SourceLocation getContinueLoc() const { return ContinueStmtBits.ContinueLoc; }
void setContinueLoc(SourceLocation L) { ContinueStmtBits.ContinueLoc = L; }
SourceLocation getBeginLoc() const { return getContinueLoc(); }
SourceLocation getEndLoc() const { return getContinueLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == ContinueStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// BreakStmt - This represents a break.
class BreakStmt : public Stmt {
public:
BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass) {
setBreakLoc(BL);
}
/// Build an empty break statement.
explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) {}
SourceLocation getBreakLoc() const { return BreakStmtBits.BreakLoc; }
void setBreakLoc(SourceLocation L) { BreakStmtBits.BreakLoc = L; }
SourceLocation getBeginLoc() const { return getBreakLoc(); }
SourceLocation getEndLoc() const { return getBreakLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == BreakStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// ReturnStmt - This represents a return, optionally of an expression:
/// return;
/// return 4;
///
/// Note that GCC allows return with no argument in a function declared to
/// return a value, and it allows returning a value in functions declared to
/// return void. We explicitly model this in the AST, which means you can't
/// depend on the return type of the function and the presence of an argument.
class ReturnStmt final
: public Stmt,
private llvm::TrailingObjects<ReturnStmt, const VarDecl *> {
friend TrailingObjects;
/// The return expression.
Stmt *RetExpr;
// ReturnStmt is followed optionally by a trailing "const VarDecl *"
// for the NRVO candidate. Present if and only if hasNRVOCandidate().
/// True if this ReturnStmt has storage for an NRVO candidate.
bool hasNRVOCandidate() const { return ReturnStmtBits.HasNRVOCandidate; }
unsigned numTrailingObjects(OverloadToken<const VarDecl *>) const {
return hasNRVOCandidate();
}
/// Build a return statement.
ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate);
/// Build an empty return statement.
explicit ReturnStmt(EmptyShell Empty, bool HasNRVOCandidate);
public:
/// Create a return statement.
static ReturnStmt *Create(const ASTContext &Ctx, SourceLocation RL, Expr *E,
const VarDecl *NRVOCandidate);
/// Create an empty return statement, optionally with
/// storage for an NRVO candidate.
static ReturnStmt *CreateEmpty(const ASTContext &Ctx, bool HasNRVOCandidate);
Expr *getRetValue() { return reinterpret_cast<Expr *>(RetExpr); }
const Expr *getRetValue() const { return reinterpret_cast<Expr *>(RetExpr); }
void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt *>(E); }
/// Retrieve the variable that might be used for the named return
/// value optimization.
///
/// The optimization itself can only be performed if the variable is
/// also marked as an NRVO object.
const VarDecl *getNRVOCandidate() const {
return hasNRVOCandidate() ? *getTrailingObjects<const VarDecl *>()
: nullptr;
}
/// Set the variable that might be used for the named return value
/// optimization. The return statement must have storage for it,
/// which is the case if and only if hasNRVOCandidate() is true.
void setNRVOCandidate(const VarDecl *Var) {
assert(hasNRVOCandidate() &&
"This return statement has no storage for an NRVO candidate!");
*getTrailingObjects<const VarDecl *>() = Var;
}
SourceLocation getReturnLoc() const { return ReturnStmtBits.RetLoc; }
void setReturnLoc(SourceLocation L) { ReturnStmtBits.RetLoc = L; }
SourceLocation getBeginLoc() const { return getReturnLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return RetExpr ? RetExpr->getEndLoc() : getReturnLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == ReturnStmtClass;
}
// Iterators
child_range children() {
if (RetExpr)
return child_range(&RetExpr, &RetExpr + 1);
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
if (RetExpr)
return const_child_range(&RetExpr, &RetExpr + 1);
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
class AsmStmt : public Stmt {
protected:
friend class ASTStmtReader;
SourceLocation AsmLoc;
/// True if the assembly statement does not have any input or output
/// operands.
bool IsSimple;
/// If true, treat this inline assembly as having side effects.
/// This assembly statement should not be optimized, deleted or moved.
bool IsVolatile;
unsigned NumOutputs;
unsigned NumInputs;
unsigned NumClobbers;
Stmt **Exprs = nullptr;
AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile,
unsigned numoutputs, unsigned numinputs, unsigned numclobbers)
: Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile),
NumOutputs(numoutputs), NumInputs(numinputs),
NumClobbers(numclobbers) {}
public:
/// Build an empty inline-assembly statement.
explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty) {}
SourceLocation getAsmLoc() const { return AsmLoc; }
void setAsmLoc(SourceLocation L) { AsmLoc = L; }
bool isSimple() const { return IsSimple; }
void setSimple(bool V) { IsSimple = V; }
bool isVolatile() const { return IsVolatile; }
void setVolatile(bool V) { IsVolatile = V; }
SourceLocation getBeginLoc() const LLVM_READONLY { return {}; }
SourceLocation getEndLoc() const LLVM_READONLY { return {}; }
//===--- Asm String Analysis ---===//
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
unsigned getNumOutputs() const { return NumOutputs; }
/// getOutputConstraint - Return the constraint string for the specified
/// output operand. All output constraints are known to be non-empty (either
/// '=' or '+').
StringRef getOutputConstraint(unsigned i) const;
/// isOutputPlusConstraint - Return true if the specified output constraint
/// is a "+" constraint (which is both an input and an output) or false if it
/// is an "=" constraint (just an output).
bool isOutputPlusConstraint(unsigned i) const {
return getOutputConstraint(i)[0] == '+';
}
const Expr *getOutputExpr(unsigned i) const;
/// getNumPlusOperands - Return the number of output operands that have a "+"
/// constraint.
unsigned getNumPlusOperands() const;
//===--- Input operands ---===//
unsigned getNumInputs() const { return NumInputs; }
/// getInputConstraint - Return the specified input constraint. Unlike output
/// constraints, these can be empty.
StringRef getInputConstraint(unsigned i) const;
const Expr *getInputExpr(unsigned i) const;
//===--- Other ---===//
unsigned getNumClobbers() const { return NumClobbers; }
StringRef getClobber(unsigned i) const;
static bool classof(const Stmt *T) {
return T->getStmtClass() == GCCAsmStmtClass ||
T->getStmtClass() == MSAsmStmtClass;
}
// Input expr iterators.
using inputs_iterator = ExprIterator;
using const_inputs_iterator = ConstExprIterator;
using inputs_range = llvm::iterator_range<inputs_iterator>;
using inputs_const_range = llvm::iterator_range<const_inputs_iterator>;
inputs_iterator begin_inputs() {
return &Exprs[0] + NumOutputs;
}
inputs_iterator end_inputs() {
return &Exprs[0] + NumOutputs + NumInputs;
}
inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); }
const_inputs_iterator begin_inputs() const {
return &Exprs[0] + NumOutputs;
}
const_inputs_iterator end_inputs() const {
return &Exprs[0] + NumOutputs + NumInputs;
}
inputs_const_range inputs() const {
return inputs_const_range(begin_inputs(), end_inputs());
}
// Output expr iterators.
using outputs_iterator = ExprIterator;
using const_outputs_iterator = ConstExprIterator;
using outputs_range = llvm::iterator_range<outputs_iterator>;
using outputs_const_range = llvm::iterator_range<const_outputs_iterator>;
outputs_iterator begin_outputs() {
return &Exprs[0];
}
outputs_iterator end_outputs() {
return &Exprs[0] + NumOutputs;
}
outputs_range outputs() {
return outputs_range(begin_outputs(), end_outputs());
}
const_outputs_iterator begin_outputs() const {
return &Exprs[0];
}
const_outputs_iterator end_outputs() const {
return &Exprs[0] + NumOutputs;
}
outputs_const_range outputs() const {
return outputs_const_range(begin_outputs(), end_outputs());
}
child_range children() {
return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs);
}
const_child_range children() const {
return const_child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs);
}
};
/// This represents a GCC inline-assembly statement extension.
class GCCAsmStmt : public AsmStmt {
friend class ASTStmtReader;
SourceLocation RParenLoc;
StringLiteral *AsmStr;
// FIXME: If we wanted to, we could allocate all of these in one big array.
StringLiteral **Constraints = nullptr;
StringLiteral **Clobbers = nullptr;
IdentifierInfo **Names = nullptr;
unsigned NumLabels = 0;
public:
GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple,
bool isvolatile, unsigned numoutputs, unsigned numinputs,
IdentifierInfo **names, StringLiteral **constraints, Expr **exprs,
StringLiteral *asmstr, unsigned numclobbers,
StringLiteral **clobbers, unsigned numlabels,
SourceLocation rparenloc);
/// Build an empty inline-assembly statement.
explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty) {}
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
//===--- Asm String Analysis ---===//
const StringLiteral *getAsmString() const { return AsmStr; }
StringLiteral *getAsmString() { return AsmStr; }
void setAsmString(StringLiteral *E) { AsmStr = E; }
/// AsmStringPiece - this is part of a decomposed asm string specification
/// (for use with the AnalyzeAsmString function below). An asm string is
/// considered to be a concatenation of these parts.
class AsmStringPiece {
public:
enum Kind {
String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%".
Operand // Operand reference, with optional modifier %c4.
};
private:
Kind MyKind;
std::string Str;
unsigned OperandNo;
// Source range for operand references.
CharSourceRange Range;
public:
AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {}
AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin,
SourceLocation End)
: MyKind(Operand), Str(S), OperandNo(OpNo),
Range(CharSourceRange::getCharRange(Begin, End)) {}
bool isString() const { return MyKind == String; }
bool isOperand() const { return MyKind == Operand; }
const std::string &getString() const { return Str; }
unsigned getOperandNo() const {
assert(isOperand());
return OperandNo;
}
CharSourceRange getRange() const {
assert(isOperand() && "Range is currently used only for Operands.");
return Range;
}
/// getModifier - Get the modifier for this operand, if present. This
/// returns '\0' if there was no modifier.
char getModifier() const;
};
/// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing
/// it into pieces. If the asm string is erroneous, emit errors and return
/// true, otherwise return false. This handles canonicalization and
/// translation of strings from GCC syntax to LLVM IR syntax, and handles
//// flattening of named references like %[foo] to Operand AsmStringPiece's.
unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces,
const ASTContext &C, unsigned &DiagOffs) const;
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
IdentifierInfo *getOutputIdentifier(unsigned i) const { return Names[i]; }
StringRef getOutputName(unsigned i) const {
if (IdentifierInfo *II = getOutputIdentifier(i))
return II->getName();
return {};
}
StringRef getOutputConstraint(unsigned i) const;
const StringLiteral *getOutputConstraintLiteral(unsigned i) const {
return Constraints[i];
}
StringLiteral *getOutputConstraintLiteral(unsigned i) {
return Constraints[i];
}
Expr *getOutputExpr(unsigned i);
const Expr *getOutputExpr(unsigned i) const {
return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i);
}
//===--- Input operands ---===//
IdentifierInfo *getInputIdentifier(unsigned i) const {
return Names[i + NumOutputs];
}
StringRef getInputName(unsigned i) const {
if (IdentifierInfo *II = getInputIdentifier(i))
return II->getName();
return {};
}
StringRef getInputConstraint(unsigned i) const;
const StringLiteral *getInputConstraintLiteral(unsigned i) const {
return Constraints[i + NumOutputs];
}
StringLiteral *getInputConstraintLiteral(unsigned i) {
return Constraints[i + NumOutputs];
}
Expr *getInputExpr(unsigned i);
void setInputExpr(unsigned i, Expr *E);
const Expr *getInputExpr(unsigned i) const {
return const_cast<GCCAsmStmt*>(this)->getInputExpr(i);
}
//===--- Labels ---===//
bool isAsmGoto() const {
return NumLabels > 0;
}
unsigned getNumLabels() const {
return NumLabels;
}
IdentifierInfo *getLabelIdentifier(unsigned i) const {
return Names[i + NumOutputs + NumInputs];
}
AddrLabelExpr *getLabelExpr(unsigned i) const;
StringRef getLabelName(unsigned i) const;
using labels_iterator = CastIterator<AddrLabelExpr>;
using const_labels_iterator = ConstCastIterator<AddrLabelExpr>;
using labels_range = llvm::iterator_range<labels_iterator>;
using labels_const_range = llvm::iterator_range<const_labels_iterator>;
labels_iterator begin_labels() {
return &Exprs[0] + NumOutputs + NumInputs;
}
labels_iterator end_labels() {
return &Exprs[0] + NumOutputs + NumInputs + NumLabels;
}
labels_range labels() {
return labels_range(begin_labels(), end_labels());
}
const_labels_iterator begin_labels() const {
return &Exprs[0] + NumOutputs + NumInputs;
}
const_labels_iterator end_labels() const {
return &Exprs[0] + NumOutputs + NumInputs + NumLabels;
}
labels_const_range labels() const {
return labels_const_range(begin_labels(), end_labels());
}
private:
void setOutputsAndInputsAndClobbers(const ASTContext &C,
IdentifierInfo **Names,
StringLiteral **Constraints,
Stmt **Exprs,
unsigned NumOutputs,
unsigned NumInputs,
unsigned NumLabels,
StringLiteral **Clobbers,
unsigned NumClobbers);
public:
//===--- Other ---===//
/// getNamedOperand - Given a symbolic operand reference like %[foo],
/// translate this into a numeric value needed to reference the same operand.
/// This returns -1 if the operand name is invalid.
int getNamedOperand(StringRef SymbolicName) const;
StringRef getClobber(unsigned i) const;
StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; }
const StringLiteral *getClobberStringLiteral(unsigned i) const {
return Clobbers[i];
}
SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; }
SourceLocation getEndLoc() const LLVM_READONLY { return RParenLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == GCCAsmStmtClass;
}
};
/// This represents a Microsoft inline-assembly statement extension.
class MSAsmStmt : public AsmStmt {
friend class ASTStmtReader;
SourceLocation LBraceLoc, EndLoc;
StringRef AsmStr;
unsigned NumAsmToks = 0;
Token *AsmToks = nullptr;
StringRef *Constraints = nullptr;
StringRef *Clobbers = nullptr;
public:
MSAsmStmt(const ASTContext &C, SourceLocation asmloc,
SourceLocation lbraceloc, bool issimple, bool isvolatile,
ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs,
ArrayRef<StringRef> constraints,
ArrayRef<Expr*> exprs, StringRef asmstr,
ArrayRef<StringRef> clobbers, SourceLocation endloc);
/// Build an empty MS-style inline-assembly statement.
explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty) {}
SourceLocation getLBraceLoc() const { return LBraceLoc; }
void setLBraceLoc(SourceLocation L) { LBraceLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
void setEndLoc(SourceLocation L) { EndLoc = L; }
bool hasBraces() const { return LBraceLoc.isValid(); }
unsigned getNumAsmToks() { return NumAsmToks; }
Token *getAsmToks() { return AsmToks; }
//===--- Asm String Analysis ---===//
StringRef getAsmString() const { return AsmStr; }
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
StringRef getOutputConstraint(unsigned i) const {
assert(i < NumOutputs);
return Constraints[i];
}
Expr *getOutputExpr(unsigned i);
const Expr *getOutputExpr(unsigned i) const {
return const_cast<MSAsmStmt*>(this)->getOutputExpr(i);
}
//===--- Input operands ---===//
StringRef getInputConstraint(unsigned i) const {
assert(i < NumInputs);
return Constraints[i + NumOutputs];
}
Expr *getInputExpr(unsigned i);
void setInputExpr(unsigned i, Expr *E);
const Expr *getInputExpr(unsigned i) const {
return const_cast<MSAsmStmt*>(this)->getInputExpr(i);
}
//===--- Other ---===//
ArrayRef<StringRef> getAllConstraints() const {
return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs);
}
ArrayRef<StringRef> getClobbers() const {
return llvm::makeArrayRef(Clobbers, NumClobbers);
}
ArrayRef<Expr*> getAllExprs() const {
return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs),
NumInputs + NumOutputs);
}
StringRef getClobber(unsigned i) const { return getClobbers()[i]; }
private:
void initialize(const ASTContext &C, StringRef AsmString,
ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints,
ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers);
public:
SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == MSAsmStmtClass;
}
child_range children() {
return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]);
}
const_child_range children() const {
return const_child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]);
}
};
class SEHExceptStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
SourceLocation Loc;
Stmt *Children[2];
enum { FILTER_EXPR, BLOCK };
SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block);
explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) {}
public:
static SEHExceptStmt* Create(const ASTContext &C,
SourceLocation ExceptLoc,
Expr *FilterExpr,
Stmt *Block);
SourceLocation getBeginLoc() const LLVM_READONLY { return getExceptLoc(); }
SourceLocation getExceptLoc() const { return Loc; }
SourceLocation getEndLoc() const { return getBlock()->getEndLoc(); }
Expr *getFilterExpr() const {
return reinterpret_cast<Expr*>(Children[FILTER_EXPR]);
}
CompoundStmt *getBlock() const {
return cast<CompoundStmt>(Children[BLOCK]);
}
child_range children() {
return child_range(Children, Children+2);
}
const_child_range children() const {
return const_child_range(Children, Children + 2);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHExceptStmtClass;
}
};
class SEHFinallyStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
SourceLocation Loc;
Stmt *Block;
SEHFinallyStmt(SourceLocation Loc, Stmt *Block);
explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) {}
public:
static SEHFinallyStmt* Create(const ASTContext &C,
SourceLocation FinallyLoc,
Stmt *Block);
SourceLocation getBeginLoc() const LLVM_READONLY { return getFinallyLoc(); }
SourceLocation getFinallyLoc() const { return Loc; }
SourceLocation getEndLoc() const { return Block->getEndLoc(); }
CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); }
child_range children() {
return child_range(&Block,&Block+1);
}
const_child_range children() const {
return const_child_range(&Block, &Block + 1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHFinallyStmtClass;
}
};
class SEHTryStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
bool IsCXXTry;
SourceLocation TryLoc;
Stmt *Children[2];
enum { TRY = 0, HANDLER = 1 };
SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try'
SourceLocation TryLoc,
Stmt *TryBlock,
Stmt *Handler);
explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) {}
public:
static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry,
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
SourceLocation getBeginLoc() const LLVM_READONLY { return getTryLoc(); }
SourceLocation getTryLoc() const { return TryLoc; }
SourceLocation getEndLoc() const { return Children[HANDLER]->getEndLoc(); }
bool getIsCXXTry() const { return IsCXXTry; }
CompoundStmt* getTryBlock() const {
return cast<CompoundStmt>(Children[TRY]);
}
Stmt *getHandler() const { return Children[HANDLER]; }
/// Returns 0 if not defined
SEHExceptStmt *getExceptHandler() const;
SEHFinallyStmt *getFinallyHandler() const;
child_range children() {
return child_range(Children, Children+2);
}
const_child_range children() const {
return const_child_range(Children, Children + 2);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHTryStmtClass;
}
};
/// Represents a __leave statement.
class SEHLeaveStmt : public Stmt {
SourceLocation LeaveLoc;
public:
explicit SEHLeaveStmt(SourceLocation LL)
: Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {}
/// Build an empty __leave statement.
explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) {}
SourceLocation getLeaveLoc() const { return LeaveLoc; }
void setLeaveLoc(SourceLocation L) { LeaveLoc = L; }
SourceLocation getBeginLoc() const LLVM_READONLY { return LeaveLoc; }
SourceLocation getEndLoc() const LLVM_READONLY { return LeaveLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHLeaveStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// This captures a statement into a function. For example, the following
/// pragma annotated compound statement can be represented as a CapturedStmt,
/// and this compound statement is the body of an anonymous outlined function.
/// @code
/// #pragma omp parallel
/// {
/// compute();
/// }
/// @endcode
class CapturedStmt : public Stmt {
public:
/// The different capture forms: by 'this', by reference, capture for
/// variable-length array type etc.
enum VariableCaptureKind {
VCK_This,
VCK_ByRef,
VCK_ByCopy,
VCK_VLAType,
};
/// Describes the capture of either a variable, or 'this', or
/// variable-length array type.
class Capture {
llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind;
SourceLocation Loc;
public:
friend class ASTStmtReader;
/// Create a new capture.
///
/// \param Loc The source location associated with this capture.
///
/// \param Kind The kind of capture (this, ByRef, ...).
///
/// \param Var The variable being captured, or null if capturing this.
Capture(SourceLocation Loc, VariableCaptureKind Kind,
VarDecl *Var = nullptr);
/// Determine the kind of capture.
VariableCaptureKind getCaptureKind() const;
/// Retrieve the source location at which the variable or 'this' was
/// first used.
SourceLocation getLocation() const { return Loc; }
/// Determine whether this capture handles the C++ 'this' pointer.
bool capturesThis() const { return getCaptureKind() == VCK_This; }
/// Determine whether this capture handles a variable (by reference).
bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; }
/// Determine whether this capture handles a variable by copy.
bool capturesVariableByCopy() const {
return getCaptureKind() == VCK_ByCopy;
}
/// Determine whether this capture handles a variable-length array
/// type.
bool capturesVariableArrayType() const {
return getCaptureKind() == VCK_VLAType;
}
/// Retrieve the declaration of the variable being captured.
///
/// This operation is only valid if this capture captures a variable.
VarDecl *getCapturedVar() const;
};
private:
/// The number of variable captured, including 'this'.
unsigned NumCaptures;
/// The pointer part is the implicit the outlined function and the
/// int part is the captured region kind, 'CR_Default' etc.
llvm::PointerIntPair<CapturedDecl *, 2, CapturedRegionKind> CapDeclAndKind;
/// The record for captured variables, a RecordDecl or CXXRecordDecl.
RecordDecl *TheRecordDecl = nullptr;
/// Construct a captured statement.
CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD);
/// Construct an empty captured statement.
CapturedStmt(EmptyShell Empty, unsigned NumCaptures);
Stmt **getStoredStmts() { return reinterpret_cast<Stmt **>(this + 1); }
Stmt *const *getStoredStmts() const {
return reinterpret_cast<Stmt *const *>(this + 1);
}
Capture *getStoredCaptures() const;
void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; }
public:
friend class ASTStmtReader;
static CapturedStmt *Create(const ASTContext &Context, Stmt *S,
CapturedRegionKind Kind,
ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits,
CapturedDecl *CD, RecordDecl *RD);
static CapturedStmt *CreateDeserialized(const ASTContext &Context,
unsigned NumCaptures);
/// Retrieve the statement being captured.
Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; }
const Stmt *getCapturedStmt() const { return getStoredStmts()[NumCaptures]; }
/// Retrieve the outlined function declaration.
CapturedDecl *getCapturedDecl();
const CapturedDecl *getCapturedDecl() const;
/// Set the outlined function declaration.
void setCapturedDecl(CapturedDecl *D);
/// Retrieve the captured region kind.
CapturedRegionKind getCapturedRegionKind() const;
/// Set the captured region kind.
void setCapturedRegionKind(CapturedRegionKind Kind);
/// Retrieve the record declaration for captured variables.
const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; }
/// Set the record declaration for captured variables.
void setCapturedRecordDecl(RecordDecl *D) {
assert(D && "null RecordDecl");
TheRecordDecl = D;
}
/// True if this variable has been captured.
bool capturesVariable(const VarDecl *Var) const;
/// An iterator that walks over the captures.
using capture_iterator = Capture *;
using const_capture_iterator = const Capture *;
using capture_range = llvm::iterator_range<capture_iterator>;
using capture_const_range = llvm::iterator_range<const_capture_iterator>;
capture_range captures() {
return capture_range(capture_begin(), capture_end());
}
capture_const_range captures() const {
return capture_const_range(capture_begin(), capture_end());
}
/// Retrieve an iterator pointing to the first capture.
capture_iterator capture_begin() { return getStoredCaptures(); }
const_capture_iterator capture_begin() const { return getStoredCaptures(); }
/// Retrieve an iterator pointing past the end of the sequence of
/// captures.
capture_iterator capture_end() const {
return getStoredCaptures() + NumCaptures;
}
/// Retrieve the number of captures, including 'this'.
unsigned capture_size() const { return NumCaptures; }
/// Iterator that walks over the capture initialization arguments.
using capture_init_iterator = Expr **;
using capture_init_range = llvm::iterator_range<capture_init_iterator>;
/// Const iterator that walks over the capture initialization
/// arguments.
using const_capture_init_iterator = Expr *const *;
using const_capture_init_range =
llvm::iterator_range<const_capture_init_iterator>;
capture_init_range capture_inits() {
return capture_init_range(capture_init_begin(), capture_init_end());
}
const_capture_init_range capture_inits() const {
return const_capture_init_range(capture_init_begin(), capture_init_end());
}
/// Retrieve the first initialization argument.
capture_init_iterator capture_init_begin() {
return reinterpret_cast<Expr **>(getStoredStmts());
}
const_capture_init_iterator capture_init_begin() const {
return reinterpret_cast<Expr *const *>(getStoredStmts());
}
/// Retrieve the iterator pointing one past the last initialization
/// argument.
capture_init_iterator capture_init_end() {
return capture_init_begin() + NumCaptures;
}
const_capture_init_iterator capture_init_end() const {
return capture_init_begin() + NumCaptures;
}
SourceLocation getBeginLoc() const LLVM_READONLY {
return getCapturedStmt()->getBeginLoc();
}
SourceLocation getEndLoc() const LLVM_READONLY {
return getCapturedStmt()->getEndLoc();
}
SourceRange getSourceRange() const LLVM_READONLY {
return getCapturedStmt()->getSourceRange();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == CapturedStmtClass;
}
child_range children();
const_child_range children() const;
};
} // namespace clang
#endif // LLVM_CLANG_AST_STMT_H
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 16;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
convolution_sgemm_packnto1.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void im2col_sgemm_packnto1_rvv(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
const int packn = csrr_vlenb() / 4;
const word_type vl = vsetvl_e32m1(packn);
// Mat bottom_im2col(size, maxk, inch, 4u * packn, packn, opt.workspace_allocator);
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
const int outch = top_blob.c;
const float* bias = _bias;
Mat tmp;
if (size >= 8)
tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 4u * packn, packn, opt.workspace_allocator);
else if (size >= 4)
tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 4u * packn, packn, opt.workspace_allocator);
else if (size >= 2)
tmp.create(2 * maxk, inch, size / 2 + size % 2, 4u * packn, packn, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 4u * packn, packn, opt.workspace_allocator);
{
int remain_size_start = 0;
int nn_size = size >> 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 8;
float* tmpptr = tmp.channel(i / 8);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * packn;
for (int k = 0; k < maxk; k++)
{
#if C906
for (int l = 0; l < packn; l++)
{
tmpptr[0] = img0[l];
tmpptr[1] = img0[l + packn];
tmpptr[2] = img0[l + packn * 2];
tmpptr[3] = img0[l + packn * 3];
tmpptr[4] = img0[l + packn * 4];
tmpptr[5] = img0[l + packn * 5];
tmpptr[6] = img0[l + packn * 6];
tmpptr[7] = img0[l + packn * 7];
tmpptr += 8;
}
img0 += size * packn;
#else
vfloat32m1_t _val0 = vle32_v_f32m1(img0, vl);
vfloat32m1_t _val1 = vle32_v_f32m1(img0 + packn, vl);
vfloat32m1_t _val2 = vle32_v_f32m1(img0 + packn * 2, vl);
vfloat32m1_t _val3 = vle32_v_f32m1(img0 + packn * 3, vl);
vfloat32m1_t _val4 = vle32_v_f32m1(img0 + packn * 4, vl);
vfloat32m1_t _val5 = vle32_v_f32m1(img0 + packn * 5, vl);
vfloat32m1_t _val6 = vle32_v_f32m1(img0 + packn * 6, vl);
vfloat32m1_t _val7 = vle32_v_f32m1(img0 + packn * 7, vl);
vsseg8e32_v_f32m1x8(tmpptr, vcreate_f32m1x8(_val0, _val1, _val2, _val3, _val4, _val5, _val6, _val7), vl);
img0 += size * packn;
tmpptr += packn * 8;
#endif
}
}
}
remain_size_start += nn_size << 3;
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * packn;
for (int k = 0; k < maxk; k++)
{
#if C906
for (int l = 0; l < packn; l++)
{
tmpptr[0] = img0[l];
tmpptr[1] = img0[l + packn];
tmpptr[2] = img0[l + packn * 2];
tmpptr[3] = img0[l + packn * 3];
tmpptr += 4;
}
img0 += size * packn;
#else
vfloat32m1_t _val0 = vle32_v_f32m1(img0, vl);
vfloat32m1_t _val1 = vle32_v_f32m1(img0 + packn, vl);
vfloat32m1_t _val2 = vle32_v_f32m1(img0 + packn * 2, vl);
vfloat32m1_t _val3 = vle32_v_f32m1(img0 + packn * 3, vl);
vsseg4e32_v_f32m1x4(tmpptr, vcreate_f32m1x4(_val0, _val1, _val2, _val3), vl);
img0 += size * packn;
tmpptr += packn * 4;
#endif
}
}
}
remain_size_start += nn_size << 2;
nn_size = (size - remain_size_start) >> 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 2;
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * packn;
for (int k = 0; k < maxk; k++)
{
#if C906
for (int l = 0; l < packn; l++)
{
tmpptr[0] = img0[l];
tmpptr[1] = img0[l + packn];
tmpptr += 2;
}
img0 += size * packn;
#else
vfloat32m1_t _val0 = vle32_v_f32m1(img0, vl);
vfloat32m1_t _val1 = vle32_v_f32m1(img0 + packn, vl);
vsseg2e32_v_f32m1x2(tmpptr, vcreate_f32m1x2(_val0, _val1), vl);
img0 += size * packn;
tmpptr += packn * 2;
#endif
}
}
}
remain_size_start += nn_size << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * packn;
for (int k = 0; k < maxk; k++)
{
vfloat32m1_t _val = vle32_v_f32m1(img0, vl);
vse32_v_f32m1(tmpptr, _val, vl);
img0 += size * packn;
tmpptr += packn;
}
}
}
}
int nn_outch = outch / packn;
int remain_outch_start = nn_outch * packn;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * packn;
float* outptr0 = top_blob.channel(p);
const float zeros[packn] = {0.f};
const float* biasptr = bias ? bias + p : zeros;
int i = 0;
for (; i + 7 < size; i += 8)
{
const float* tmpptr = tmp.channel(i / 8);
const float* kptr0 = kernel.channel(p / packn);
int nn = inch * maxk * packn; // inch always > 0
vfloat32m1_t _sum0 = vle32_v_f32m1(biasptr, vl);
vfloat32m1_t _sum1 = vle32_v_f32m1(biasptr, vl);
vfloat32m1_t _sum2 = vle32_v_f32m1(biasptr, vl);
vfloat32m1_t _sum3 = vle32_v_f32m1(biasptr, vl);
vfloat32m1_t _sum4 = vle32_v_f32m1(biasptr, vl);
vfloat32m1_t _sum5 = vle32_v_f32m1(biasptr, vl);
vfloat32m1_t _sum6 = vle32_v_f32m1(biasptr, vl);
vfloat32m1_t _sum7 = vle32_v_f32m1(biasptr, vl);
for (int j = 0; j < nn; j++)
{
float val0 = *tmpptr++;
float val1 = *tmpptr++;
float val2 = *tmpptr++;
float val3 = *tmpptr++;
float val4 = *tmpptr++;
float val5 = *tmpptr++;
float val6 = *tmpptr++;
float val7 = *tmpptr++;
vfloat32m1_t _w0 = vle32_v_f32m1(kptr0, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, val0, _w0, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, val1, _w0, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, val2, _w0, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, val3, _w0, vl);
_sum4 = vfmacc_vf_f32m1(_sum4, val4, _w0, vl);
_sum5 = vfmacc_vf_f32m1(_sum5, val5, _w0, vl);
_sum6 = vfmacc_vf_f32m1(_sum6, val6, _w0, vl);
_sum7 = vfmacc_vf_f32m1(_sum7, val7, _w0, vl);
kptr0 += packn;
}
#if C906
vsse32_v_f32m1(outptr0, top_blob.cstep * sizeof(float), _sum0, vl);
vsse32_v_f32m1(outptr0 + 1, top_blob.cstep * sizeof(float), _sum1, vl);
vsse32_v_f32m1(outptr0 + 2, top_blob.cstep * sizeof(float), _sum2, vl);
vsse32_v_f32m1(outptr0 + 3, top_blob.cstep * sizeof(float), _sum3, vl);
vsse32_v_f32m1(outptr0 + 4, top_blob.cstep * sizeof(float), _sum4, vl);
vsse32_v_f32m1(outptr0 + 5, top_blob.cstep * sizeof(float), _sum5, vl);
vsse32_v_f32m1(outptr0 + 6, top_blob.cstep * sizeof(float), _sum6, vl);
vsse32_v_f32m1(outptr0 + 7, top_blob.cstep * sizeof(float), _sum7, vl);
#else
vssseg8e32_v_f32m1x8(outptr0, top_blob.cstep * sizeof(float), vcreate_f32m1x8(_sum0, _sum1, _sum2, _sum3, _sum4, _sum5, _sum6, _sum7), vl);
#endif
outptr0 += 8;
}
for (; i + 3 < size; i += 4)
{
const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
const float* kptr0 = kernel.channel(p / packn);
int nn = inch * maxk * packn; // inch always > 0
vfloat32m1_t _sum0 = vle32_v_f32m1(biasptr, vl);
vfloat32m1_t _sum1 = vle32_v_f32m1(biasptr, vl);
vfloat32m1_t _sum2 = vle32_v_f32m1(biasptr, vl);
vfloat32m1_t _sum3 = vle32_v_f32m1(biasptr, vl);
for (int j = 0; j < nn; j++)
{
float val0 = *tmpptr++;
float val1 = *tmpptr++;
float val2 = *tmpptr++;
float val3 = *tmpptr++;
vfloat32m1_t _w0 = vle32_v_f32m1(kptr0, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, val0, _w0, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, val1, _w0, vl);
_sum2 = vfmacc_vf_f32m1(_sum2, val2, _w0, vl);
_sum3 = vfmacc_vf_f32m1(_sum3, val3, _w0, vl);
kptr0 += packn;
}
#if C906
vsse32_v_f32m1(outptr0, top_blob.cstep * sizeof(float), _sum0, vl);
vsse32_v_f32m1(outptr0 + 1, top_blob.cstep * sizeof(float), _sum1, vl);
vsse32_v_f32m1(outptr0 + 2, top_blob.cstep * sizeof(float), _sum2, vl);
vsse32_v_f32m1(outptr0 + 3, top_blob.cstep * sizeof(float), _sum3, vl);
#else
vssseg4e32_v_f32m1x4(outptr0, top_blob.cstep * sizeof(float), vcreate_f32m1x4(_sum0, _sum1, _sum2, _sum3), vl);
#endif
outptr0 += 4;
}
for (; i + 1 < size; i += 2)
{
const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2);
const float* kptr0 = kernel.channel(p / packn);
int nn = inch * maxk * packn; // inch always > 0
vfloat32m1_t _sum0 = vle32_v_f32m1(biasptr, vl);
vfloat32m1_t _sum1 = vle32_v_f32m1(biasptr, vl);
for (int j = 0; j < nn; j++)
{
float val0 = *tmpptr++;
float val1 = *tmpptr++;
vfloat32m1_t _w0 = vle32_v_f32m1(kptr0, vl);
_sum0 = vfmacc_vf_f32m1(_sum0, val0, _w0, vl);
_sum1 = vfmacc_vf_f32m1(_sum1, val1, _w0, vl);
kptr0 += packn;
}
#if C906
vsse32_v_f32m1(outptr0, top_blob.cstep * sizeof(float), _sum0, vl);
vsse32_v_f32m1(outptr0 + 1, top_blob.cstep * sizeof(float), _sum1, vl);
#else
vssseg2e32_v_f32m1x2(outptr0, top_blob.cstep * sizeof(float), vcreate_f32m1x2(_sum0, _sum1), vl);
#endif
outptr0 += 2;
}
for (; i < size; i++)
{
const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
const float* kptr0 = kernel.channel(p / packn);
int nn = inch * maxk * packn; // inch always > 0
vfloat32m1_t _sum = vle32_v_f32m1(biasptr, vl);
for (int j = 0; j < nn; j++)
{
float val = *tmpptr++;
vfloat32m1_t _w0 = vle32_v_f32m1(kptr0, vl);
_sum = vfmacc_vf_f32m1(_sum, val, _w0, vl);
kptr0 += packn;
}
vsse32_v_f32m1(outptr0, top_blob.cstep * sizeof(float), _sum, vl);
outptr0 += 1;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
float* outptr0 = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
int i = 0;
for (; i + 7 < size; i += 8)
{
const float* tmpptr = tmp.channel(i / 8);
const float* kptr0 = kernel.channel(p / packn + p % packn);
int nn = inch * maxk; // inch always > 0
float sum0 = bias0;
float sum1 = bias0;
float sum2 = bias0;
float sum3 = bias0;
float sum4 = bias0;
float sum5 = bias0;
float sum6 = bias0;
float sum7 = bias0;
vfloat32m1_t _sum0 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum1 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum2 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum3 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum4 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum5 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum6 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum7 = vfmv_v_f_f32m1(0.f, vl);
for (int j = 0; j < nn; j++)
{
vfloat32m1x8_t _val01 = vlseg8e32_v_f32m1x8(tmpptr, vl);
vfloat32m1_t _w0 = vle32_v_f32m1(kptr0, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, vget_f32m1x8_f32m1(_val01, 0), _w0, vl);
_sum1 = vfmacc_vv_f32m1(_sum1, vget_f32m1x8_f32m1(_val01, 1), _w0, vl);
_sum2 = vfmacc_vv_f32m1(_sum2, vget_f32m1x8_f32m1(_val01, 2), _w0, vl);
_sum3 = vfmacc_vv_f32m1(_sum3, vget_f32m1x8_f32m1(_val01, 3), _w0, vl);
_sum4 = vfmacc_vv_f32m1(_sum4, vget_f32m1x8_f32m1(_val01, 4), _w0, vl);
_sum5 = vfmacc_vv_f32m1(_sum5, vget_f32m1x8_f32m1(_val01, 5), _w0, vl);
_sum6 = vfmacc_vv_f32m1(_sum6, vget_f32m1x8_f32m1(_val01, 6), _w0, vl);
_sum7 = vfmacc_vv_f32m1(_sum7, vget_f32m1x8_f32m1(_val01, 7), _w0, vl);
tmpptr += packn * 8;
kptr0 += packn;
}
#if C906
// TODO
std::vector<float> ss0(packn);
std::vector<float> ss1(packn);
std::vector<float> ss2(packn);
std::vector<float> ss3(packn);
std::vector<float> ss4(packn);
std::vector<float> ss5(packn);
std::vector<float> ss6(packn);
std::vector<float> ss7(packn);
vse32_v_f32m1((float*)ss0.data(), _sum0, vl);
vse32_v_f32m1((float*)ss1.data(), _sum1, vl);
vse32_v_f32m1((float*)ss2.data(), _sum2, vl);
vse32_v_f32m1((float*)ss3.data(), _sum3, vl);
vse32_v_f32m1((float*)ss4.data(), _sum4, vl);
vse32_v_f32m1((float*)ss5.data(), _sum5, vl);
vse32_v_f32m1((float*)ss6.data(), _sum6, vl);
vse32_v_f32m1((float*)ss7.data(), _sum7, vl);
for (int i = 0; i < packn; i++)
{
sum0 += ss0[i];
sum1 += ss1[i];
sum2 += ss2[i];
sum3 += ss3[i];
sum4 += ss4[i];
sum5 += ss5[i];
sum6 += ss6[i];
sum7 += ss7[i];
}
#else
sum0 = vfmv_f_s_f32m1_f32(vfredsum_vs_f32m1_f32m1(vfloat32m1_t(), _sum0, vfmv_s_f_f32m1(vfloat32m1_t(), sum0, vl), vl));
sum1 = vfmv_f_s_f32m1_f32(vfredsum_vs_f32m1_f32m1(vfloat32m1_t(), _sum1, vfmv_s_f_f32m1(vfloat32m1_t(), sum1, vl), vl));
sum2 = vfmv_f_s_f32m1_f32(vfredsum_vs_f32m1_f32m1(vfloat32m1_t(), _sum2, vfmv_s_f_f32m1(vfloat32m1_t(), sum2, vl), vl));
sum3 = vfmv_f_s_f32m1_f32(vfredsum_vs_f32m1_f32m1(vfloat32m1_t(), _sum3, vfmv_s_f_f32m1(vfloat32m1_t(), sum3, vl), vl));
sum4 = vfmv_f_s_f32m1_f32(vfredsum_vs_f32m1_f32m1(vfloat32m1_t(), _sum4, vfmv_s_f_f32m1(vfloat32m1_t(), sum4, vl), vl));
sum5 = vfmv_f_s_f32m1_f32(vfredsum_vs_f32m1_f32m1(vfloat32m1_t(), _sum5, vfmv_s_f_f32m1(vfloat32m1_t(), sum5, vl), vl));
sum6 = vfmv_f_s_f32m1_f32(vfredsum_vs_f32m1_f32m1(vfloat32m1_t(), _sum6, vfmv_s_f_f32m1(vfloat32m1_t(), sum6, vl), vl));
sum7 = vfmv_f_s_f32m1_f32(vfredsum_vs_f32m1_f32m1(vfloat32m1_t(), _sum7, vfmv_s_f_f32m1(vfloat32m1_t(), sum7, vl), vl));
#endif
outptr0[0] = sum0;
outptr0[1] = sum1;
outptr0[2] = sum2;
outptr0[3] = sum3;
outptr0[4] = sum4;
outptr0[5] = sum5;
outptr0[6] = sum6;
outptr0[7] = sum7;
outptr0 += 8;
}
for (; i + 3 < size; i += 4)
{
const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
const float* kptr0 = kernel.channel(p / packn + p % packn);
int nn = inch * maxk; // inch always > 0
float sum0 = bias0;
float sum1 = bias0;
float sum2 = bias0;
float sum3 = bias0;
vfloat32m1_t _sum0 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum1 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum2 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum3 = vfmv_v_f_f32m1(0.f, vl);
for (int j = 0; j < nn; j++)
{
vfloat32m1x4_t _val01 = vlseg4e32_v_f32m1x4(tmpptr, vl);
vfloat32m1_t _w0 = vle32_v_f32m1(kptr0, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, vget_f32m1x4_f32m1(_val01, 0), _w0, vl);
_sum1 = vfmacc_vv_f32m1(_sum1, vget_f32m1x4_f32m1(_val01, 1), _w0, vl);
_sum2 = vfmacc_vv_f32m1(_sum2, vget_f32m1x4_f32m1(_val01, 2), _w0, vl);
_sum3 = vfmacc_vv_f32m1(_sum3, vget_f32m1x4_f32m1(_val01, 3), _w0, vl);
tmpptr += packn * 4;
kptr0 += packn;
}
#if C906
// TODO
std::vector<float> ss0(packn);
std::vector<float> ss1(packn);
std::vector<float> ss2(packn);
std::vector<float> ss3(packn);
vse32_v_f32m1((float*)ss0.data(), _sum0, vl);
vse32_v_f32m1((float*)ss1.data(), _sum1, vl);
vse32_v_f32m1((float*)ss2.data(), _sum2, vl);
vse32_v_f32m1((float*)ss3.data(), _sum3, vl);
for (int i = 0; i < packn; i++)
{
sum0 += ss0[i];
sum1 += ss1[i];
sum2 += ss2[i];
sum3 += ss3[i];
}
#else
sum0 = vfmv_f_s_f32m1_f32(vfredsum_vs_f32m1_f32m1(vfloat32m1_t(), _sum0, vfmv_s_f_f32m1(vfloat32m1_t(), sum0, vl), vl));
sum1 = vfmv_f_s_f32m1_f32(vfredsum_vs_f32m1_f32m1(vfloat32m1_t(), _sum1, vfmv_s_f_f32m1(vfloat32m1_t(), sum1, vl), vl));
sum2 = vfmv_f_s_f32m1_f32(vfredsum_vs_f32m1_f32m1(vfloat32m1_t(), _sum2, vfmv_s_f_f32m1(vfloat32m1_t(), sum2, vl), vl));
sum3 = vfmv_f_s_f32m1_f32(vfredsum_vs_f32m1_f32m1(vfloat32m1_t(), _sum3, vfmv_s_f_f32m1(vfloat32m1_t(), sum3, vl), vl));
#endif
outptr0[0] = sum0;
outptr0[1] = sum1;
outptr0[2] = sum2;
outptr0[3] = sum3;
outptr0 += 4;
}
for (; i + 1 < size; i += 2)
{
const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2);
const float* kptr0 = kernel.channel(p / packn + p % packn);
int nn = inch * maxk; // inch always > 0
float sum0 = bias0;
float sum1 = bias0;
vfloat32m1_t _sum0 = vfmv_v_f_f32m1(0.f, vl);
vfloat32m1_t _sum1 = vfmv_v_f_f32m1(0.f, vl);
for (int j = 0; j < nn; j++)
{
vfloat32m1x2_t _val01 = vlseg2e32_v_f32m1x2(tmpptr, vl);
vfloat32m1_t _w0 = vle32_v_f32m1(kptr0, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, vget_f32m1x2_f32m1(_val01, 0), _w0, vl);
_sum1 = vfmacc_vv_f32m1(_sum1, vget_f32m1x2_f32m1(_val01, 1), _w0, vl);
tmpptr += packn * 2;
kptr0 += packn;
}
#if C906
// TODO
std::vector<float> ss0(packn);
std::vector<float> ss1(packn);
vse32_v_f32m1((float*)ss0.data(), _sum0, vl);
vse32_v_f32m1((float*)ss1.data(), _sum1, vl);
for (int i = 0; i < packn; i++)
{
sum0 += ss0[i];
sum1 += ss1[i];
}
#else
sum0 = vfmv_f_s_f32m1_f32(vfredsum_vs_f32m1_f32m1(vfloat32m1_t(), _sum0, vfmv_s_f_f32m1(vfloat32m1_t(), sum0, vl), vl));
sum1 = vfmv_f_s_f32m1_f32(vfredsum_vs_f32m1_f32m1(vfloat32m1_t(), _sum1, vfmv_s_f_f32m1(vfloat32m1_t(), sum1, vl), vl));
#endif
outptr0[0] = sum0;
outptr0[1] = sum1;
outptr0 += 2;
}
for (; i < size; i++)
{
const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
const float* kptr0 = kernel.channel(p / packn + p % packn);
int nn = inch * maxk; // inch always > 0
float sum0 = bias0;
vfloat32m1_t _sum0 = vfmv_v_f_f32m1(0.f, vl);
for (int j = 0; j < nn; j++)
{
vfloat32m1_t _val0 = vle32_v_f32m1(tmpptr, vl);
vfloat32m1_t _w0 = vle32_v_f32m1(kptr0, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _val0, _w0, vl);
tmpptr += packn;
kptr0 += packn;
}
#if C906
// TODO
std::vector<float> ss0(packn);
vse32_v_f32m1((float*)ss0.data(), _sum0, vl);
for (int i = 0; i < packn; i++)
{
sum0 += ss0[i];
}
#else
sum0 = vfmv_f_s_f32m1_f32(vfredsum_vs_f32m1_f32m1(vfloat32m1_t(), _sum0, vfmv_s_f_f32m1(vfloat32m1_t(), sum0, vl), vl));
#endif
outptr0[0] = sum0;
outptr0 += 1;
}
}
}
static void convolution_im2col_sgemm_transform_kernel_packnto1_rvv(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h)
{
const int packn = csrr_vlenb() / 4;
const int maxk = kernel_w * kernel_h;
// interleave
// src = maxk-inch-outch
// dst = pb-pa-maxk-inch/pa-outch/pb
Mat kernel = _kernel.reshape(maxk, inch, outch);
kernel_tm.create(packn * packn * maxk, inch / packn, outch / packn + outch % packn);
int q = 0;
for (; q + (packn - 1) < outch; q += packn)
{
float* g00 = kernel_tm.channel(q / packn);
for (int p = 0; p + (packn - 1) < inch; p += packn)
{
for (int k = 0; k < maxk; k++)
{
for (int i = 0; i < packn; i++)
{
for (int j = 0; j < packn; j++)
{
const float* k00 = kernel.channel(q + j).row(p + i);
g00[0] = k00[k];
g00++;
}
}
}
}
}
for (; q < outch; q++)
{
const Mat k0 = kernel.channel(q);
float* g00 = kernel_tm.channel(q / packn + q % packn);
for (int p = 0; p + (packn - 1) < inch; p += packn)
{
for (int k = 0; k < maxk; k++)
{
for (int j = 0; j < packn; j++)
{
const float* k00 = k0.row(p + j);
g00[0] = k00[k];
g00++;
}
}
}
}
}
static void convolution_im2col_sgemm_packnto1_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt)
{
const int packn = csrr_vlenb() / 4;
const word_type vl = vsetvl_e32m1(packn);
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int size = outw * outh;
const int maxk = kernel_w * kernel_h;
// im2col
Mat bottom_im2col(size, maxk, inch, 4u * packn, packn, opt.workspace_allocator);
{
const int gap = (w * stride_h - outw * stride_w) * packn;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < inch; p++)
{
const Mat img = bottom_blob.channel(p);
float* ptr = bottom_im2col.channel(p);
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
const float* sptr = img.row(dilation_h * u) + dilation_w * v * packn;
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
vfloat32m1_t _val = vle32_v_f32m1(sptr, vl);
vse32_v_f32m1(ptr, _val, vl);
sptr += stride_w * packn;
ptr += packn;
}
sptr += gap;
}
}
}
}
}
im2col_sgemm_packnto1_rvv(bottom_im2col, top_blob, kernel, _bias, opt);
}
|
axpy_double.c | //axpy.c
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/timeb.h>
#include <malloc.h>
#define N_RUNS 1000
#define N 1200
// read timer in second
double read_timer() {
struct timeb tm;
ftime(&tm);
return (double) tm.time + (double) tm.millitm / 1000.0;
}
//Create a matrix and a vector and fill with random numbers
void init(double *X, double *Y) {
for (int i = 0; i<N; i++) {
X[i] = (double)rand()/(double)(RAND_MAX/10.0);
Y[i] = (double)rand()/(double)(RAND_MAX/10.0);
}
}
//Our sum function- what it does is pretty straight-forward.
void axpy(double *X, double *Y, double a) {
#pragma omp simd
for (int i = 0; i<N; i++) {
Y[i] += a * X[i];
}
}
// Debug functions
void axpy_serial(double *X, double *Y, double a) {
for (int i = 0; i<N; i++) {
Y[i] += a * X[i];
}
}
void print_vector(double *vector) {
printf("[");
for (int i = 0; i<8; i++) {
printf("%.2f ", vector[i]);
}
puts("]");
}
double check(double *A, double *B){
double difference = 0;
for(int i = 0;i<N; i++){
difference += A[i]- B[i];
}
return difference;
}
int main(int argc, char **argv) {
//Set everything up
double *X = malloc(sizeof(double)*N);
double *Y = malloc(sizeof(double)*N);
double *Y_serial = malloc(sizeof(double)*N);
double a = 3.14;
srand(time(NULL));
init(X, Y);
for (int i = 0; i<N; i++) Y_serial[i] = Y[i];
print_vector(Y);
print_vector(X);
printf("%.2f\n", a);
puts("=\n");
double start = read_timer();
for (int i = 0; i<N_RUNS; i++)
axpy(X, Y, a);
double t = (read_timer() - start);
double start_serial = read_timer();
for (int i = 0; i<N_RUNS; i++)
axpy_serial(X, Y_serial, a);
double t_serial = (read_timer() - start_serial);
print_vector(Y);
puts("---------------------------------");
print_vector(Y_serial);
double gflops = ((2.0 * N) * N * N_RUNS) / (1.0e9 * t);
double gflops_serial = ((2.0 * N) * N * N_RUNS) / (1.0e9 * t_serial);
printf("==================================================================\n");
printf("Performance:\t\t\tRuntime (s)\t GFLOPS\n");
printf("------------------------------------------------------------------\n");
printf("AXPY (SIMD):\t\t%4f\t%4f\n", t, gflops);
printf("AXPY (Serial):\t\t%4f\t%4f\n", t_serial, gflops_serial);
printf("Correctness check: %f\n", check(Y,Y_serial));
free(X);
free(Y);
free(Y_serial);
return 0;
}
|
reorder.c | //
// reorder.c
//
// Created by Hussian Alamri on November 2013
//
#include <stdio.h>
#include <stdlib.h>
#include "reorder.h"
int index_gc = 0;
int P[20000] = {0};
void column_intersection(double** input, double** output, int nrows, int ncols) {
/* input: original sparse matrix - will not be modified
output: reordered sparse matrix */
int i, ij, j, ijk, max_pos;
int column_length = nrows;
int row_length = ncols;
int *intersection = (int *)malloc(ncols*sizeof(int));
for (j = 0; j < ncols; ++j) {
intersection[j] = 0;
}
ijk = 0;
P[ijk] = 0; // choice of 1st column effects resulting reorder
while (ijk < ncols-1) {
#pragma omp parallel for default(shared) private(j, i)
for (j = 0; j < ncols; ++j) {
for (i = 0; i < nrows; ++i) {
if (input[i][j] != 0) {
if (input[i][P[ijk]] != 0) {
intersection[j]++;
}
}
}
}
max_pos = findMax(intersection, ncols);
#pragma omp parallel for default(shared) private(ij)
for (ij = 0; ij < ncols; ++ij) {
/* if max_pos is in permutation find next max */
if(exists(P, ncols, max_pos)) { /* find next max */
intersection[max_pos] = -1;
max_pos = findMax(intersection, ncols);
}
}
ijk++;
P[ijk] = max_pos;
/* reset intersection weights */
#pragma omp parallel for default(shared) private(j)
for (j = 0; j < ncols; ++j) {
intersection[j] = 0;
}
} // while-loop done
free(intersection);
return;
}
void gray_code(double** A, int* C, int rowIndex, int sign, int nrows, int ncols) {
int ijk, jj, ii, i1, i2;
int *C1 = (int*)malloc(ncols*sizeof(int));
// C1: int array holding indices of all columns with nonzero at rowIndex
int *C2 = (int*)malloc(ncols*sizeof(int));
// C2: int array holding indices of all columns with zero at rowIndex
int length1 = ncols;
int length2 = ncols;
i1 = 0;
i2 = 0;
#ifdef LOG
printf("ncols %d, nrows %d, rowIndex %d, sign %d\n", ncols, nrows, rowIndex, sign);
#endif
if (rowIndex == nrows || ncols <= 1) {
for (jj = 0; jj < ncols; ++jj) {
P[index_gc] = C[jj];
index_gc++;
}
free(C1);
free(C2);
return;
}
/* reset C1 and C2 (is this necessary?) */
for (ijk = 0; ijk < ncols; ++ijk) {
C1[ijk] = -1;
C2[ijk] = -1;
}
/* partition C into two disjoint sets C1 and C2 such that C1 contains all column indices that have nonzeros in rowIndex and C2 contains the other indices */
for (ii = 0; ii < ncols; ++ii) {
if(A[rowIndex][C[ii]] != 0) {
C1[i1] = C[ii];
i1++;
} else if(A[rowIndex][C[ii]] == 0) {
C2[i2] = C[ii];
i2++;
}
}
rowIndex++;
// update new length (ncols on next recursive call)
length1 = i1;
length2 = i2;
if (sign == +1) {
gray_code(A, C1, rowIndex, -1, nrows, length1);
gray_code(A, C2, rowIndex, +1, nrows, length2);
} else {
gray_code(A, C2, rowIndex, -1, nrows, length2);
gray_code(A, C1, rowIndex, +1, nrows, length1);
}
}
|
easyopt.c | /*
This implementation does some easy optimization on the baseline code.
*/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#define L1 2000
#define L2 2000
#define LT 100
#define SEED 0
#define LS (L1 * L2)
char world[L1 + 2][L2 + 2];
char result[L1 + 2][L2 + 2];
double get_walltime() {
struct timeval tp;
gettimeofday(&tp, NULL);
return (double)(tp.tv_sec + tp.tv_usec * 1e-6);
}
int main(int argc, char *argv[]) {
int i, j, t, x, y, x0, y0;
int ineighbor, neighbors, population0, population;
double time;
// initialize data
srand(SEED);
population0 = 0;
for (i = 0; i < L1; i++) {
for (j = 0; j < L2; j++) {
world[i + 1][j + 1] = rand() % 2;
if (world[i + 1][j + 1] == 1) population0++;
}
}
time = get_walltime();
for (t = 0; t < LT; t++) {
#pragma omp parallel for
for (i = 1; i <= L1; i++) {
world[i][0] = world[i][L2];
world[i][L2 + 1] = world[i][1];
}
#pragma omp parallel for
for (j = 1; j <= L2; j++) {
world[0][j] = world[L1][j];
world[L1 + 1][j] = world[1][j];
}
world[0][0] = world[L1][L2];
world[0][L2 + 1] = world[L1][1];
world[L1 + 1][0] = world[1][L2];
world[L1 + 1][L2 + 1] = world[1][1];
#pragma omp parallel for collapse(2)
for (i = 1; i <= L1; i++) {
for (j = 1; j <= L2; j++) {
neighbors = world[i - 1][j - 1] + world[i - 1][j] +
world[i - 1][j + 1] + world[i][j - 1] +
world[i][j + 1] + world[i + 1][j - 1] +
world[i + 1][j] + world[i + 1][j + 1];
result[i][j] =
(neighbors == 3 || (neighbors == 2 && (world[i][j] != 0)));
}
}
memcpy(world, result, sizeof(world));
}
population = 0;
#pragma omp parallel for collapse(2) reduction(+:population)
for (i = 1; i <= L1; i++) {
for (j = 1; j <= L2; j++) {
population += world[i][j];
}
}
time = get_walltime() - time;
// report results
printf("World size: %d x %d, total generations: %d\n", L1, L2, LT);
printf("Population is changed from %d to %d\n", population0, population);
printf("Max Threads: %d\n", omp_get_max_threads());
printf("Wall time: %f\n", time);
return 0;
} |
softmax-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 by Contributors
* \file softmax-inl.h
* \brief
*/
#ifndef MXNET_OPERATOR_NN_SOFTMAX_INL_H_
#define MXNET_OPERATOR_NN_SOFTMAX_INL_H_
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
#include <type_traits>
#include "../mxnet_op.h"
#include "../operator_common.h"
#include "../tensor/broadcast_reduce_op.h"
#include "../../common/cuda/utils.h"
using mshadow::red::limits::MinValue;
namespace mxnet {
namespace op {
namespace mxnet_op {
struct softmax_fwd {
template<typename AType>
MSHADOW_XINLINE static AType Map(float a, AType b) {
return AType(expf(a)/b);
}
template<typename AType>
MSHADOW_XINLINE static AType Map(double a, AType b) {
return AType(exp(a)/b);
}
};
struct log_softmax_fwd {
template<typename DType>
MSHADOW_XINLINE static float Map(DType a, float b) {
return a - logf(b);
}
template<typename DType>
MSHADOW_XINLINE static double Map(DType a, double b) {
return a - log(b);
}
};
template<typename OP, bool negate, typename AType, typename DType, typename OType,
typename IType, int ndim>
inline void Softmax(Stream<cpu> *s, DType *in, OType *out, IType *length,
Shape<ndim> shape, int axis, const DType temperature) {
index_t M = shape[axis];
if (M == 0) return;
index_t N = shape.Size()/M;
Shape<ndim> stride = calc_stride(shape);
Shape<ndim> sshape = shape;
sshape[axis] = 1;
index_t sa = stride[axis];
if (length == nullptr) {
#pragma omp parallel for
for (index_t i = 0; i < N; ++i) {
index_t base = unravel_dot(i, sshape, stride);
DType mmax = negate ? -in[base] : in[base];
DType val;
for (index_t j = 1; j < M; ++j) {
val = negate ? -in[base + j*sa] : in[base + j*sa];
if (mmax < val) mmax = val;
}
AType sum = AType(0);
DType in_val;
// By default temperature is 1.0.
// Adding a branch here to save the CPU 'divide-by-1' computation at runtime
if (temperature == 1.0) {
for (index_t j = 0; j < M; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
sum += std::exp(in_val - mmax);
}
for (index_t j = 0; j < M; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
out[base + j*sa] = OP::Map(in_val - mmax, sum);
}
} else {
for (index_t j = 0; j < M; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
sum += std::exp((in_val - mmax)/temperature);
}
for (index_t j = 0; j < M; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
out[base + j*sa] = OP::Map((in_val - mmax)/temperature, sum);
}
}
}
} else {
#pragma omp parallel for
for (index_t i = 0; i < N; ++i) {
index_t len = static_cast<index_t>(length[i]);
index_t base = unravel_dot(i, sshape, stride);
DType mmax = negate ? -in[base] : in[base];
DType val;
for (index_t j = 1; j < len; ++j) {
val = negate ? -in[base + j*sa] : in[base + j*sa];
if (mmax < val) mmax = val;
}
for (index_t j = len; j < M; ++j) {
out[base + j*sa] = OType(0.0f);
}
AType sum = AType(0);
DType in_val;
// By default temperature is 1.0.
// Adding a branch here to save the CPU 'divide-by-1' computation at runtime
if (temperature == 1.0) {
for (index_t j = 0; j < len; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
sum += std::exp(in_val - mmax);
}
for (index_t j = 0; j < len; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
out[base + j*sa] = OP::Map(in_val - mmax, sum);
}
} else {
for (index_t j = 0; j < len; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
sum += std::exp((in_val - mmax)/temperature);
}
for (index_t j = 0; j < len; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
out[base + j*sa] = OP::Map((in_val - mmax)/temperature, sum);
}
}
}
}
}
struct masked_softmax_where {
template<typename DType, int ndim>
MSHADOW_XINLINE static void Map(index_t id, DType* out, const bool* cond,
const DType* x, const double y,
Shape<ndim> data_shape, Shape<ndim> mask_shape) {
index_t mask_pos = 0;
index_t stride = 1;
for (index_t i = ndim-1, j = id; i >=0; --i) {
auto tmp = j / data_shape[i];
if (mask_shape[i] != 1) {
mask_pos += (j - tmp * mask_shape[i]) * stride;
}
stride *= mask_shape[i];
j = tmp;
}
KERNEL_ASSIGN(out[id], kWriteTo, (cond[mask_pos] ? x[id] : static_cast<DType>(y)));
}
};
template<typename OP, bool masked_neg_inf, bool negate,
typename AType, typename DType, int ndim>
inline void MaskedSoftmax(Stream<cpu> *s, DType *in, DType *out, bool *mask,
Shape<ndim> data_shape, Shape<ndim> mask_shape,
int axis, const double temperature, bool normalize,
const OpContext& ctx) {
Tensor<cpu, 1, DType> workspace = ctx.requested[0].get_space_typed<cpu, 1, DType>(
Shape1(data_shape.Size()), s);
DType* masked_input = TBlob(workspace).dptr<DType>();
double neg = MinValue<DType>();
Kernel<masked_softmax_where, cpu>::Launch(s, data_shape.Size(), masked_input,
mask, in, neg, data_shape, mask_shape);
int* max_lenghts = nullptr;
double masked_value = 0.0;
if (masked_neg_inf)
masked_value = -INFINITY;
Softmax<OP, negate, AType, DType>(s, masked_input, out, max_lenghts,
data_shape, axis, temperature);
Kernel<masked_softmax_where, cpu>::Launch(s, data_shape.Size(), out,
mask, out, masked_value, data_shape,
mask_shape);
}
struct softmax_bwd {
template<typename DType, typename AType>
MSHADOW_XINLINE static AType Map(DType ograd, DType out, AType sum) {
return AType(out * (ograd - sum));
}
};
struct log_softmax_bwd {
template<typename AType>
MSHADOW_XINLINE static AType Map(float ograd, float out, AType sum) {
return AType(ograd - expf(out)*sum);
}
template<typename AType>
MSHADOW_XINLINE static AType Map(double ograd, double out, AType sum) {
return AType(ograd - exp(out)*sum);
}
};
template<typename OP1, typename OP2, int Req, bool negate,
typename AType, typename DType, typename OType, typename IType, int ndim>
inline void SoftmaxGrad(Stream<cpu> *s, OType *out, OType *ograd,
DType *igrad, IType *length, Shape<ndim> shape,
int axis, const DType temperature) {
index_t M = shape[axis];
if (M == 0) return;
index_t N = shape.Size()/M;
Shape<ndim> stride = calc_stride(shape);
Shape<ndim> sshape = shape;
sshape[axis] = 1;
index_t sa = stride[axis];
if (length != nullptr) {
#pragma omp parallel for
for (index_t i = 0; i < N; ++i) {
index_t base = unravel_dot(i, sshape, stride);
index_t len = static_cast<index_t>(length[i]);
AType sum = AType(0);
for (index_t j = 0; j < len; ++j) {
sum += OP1::Map(ograd[base + j*sa], out[base + j*sa]);
}
// By default temperature is 1.0.
// Adding a branch here to save the CPU 'divide-by-1' computation at runtime
DType final_result;
if (temperature == 1.0) {
for (index_t j = 0; j < M; ++j) {
final_result = negate ?
-OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) :
OP2::Map(ograd[base + j*sa], out[base + j*sa], sum);
final_result = (j < len) ? final_result : DType(0.0f);
KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result);
}
} else {
for (index_t j = 0; j < M; ++j) {
final_result = negate ?
-OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature :
OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature;
final_result = (j < len) ? final_result : DType(0.0f);
KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result);
}
}
}
} else {
#pragma omp parallel for
for (index_t i = 0; i < N; ++i) {
index_t base = unravel_dot(i, sshape, stride);
AType sum = AType(0);
for (index_t j = 0; j < M; ++j) {
sum += OP1::Map(ograd[base + j*sa], out[base + j*sa]);
}
// By default temperature is 1.0.
// Adding a branch here to save the CPU 'divide-by-1' computation at runtime
DType final_result;
if (temperature == 1.0) {
for (index_t j = 0; j < M; ++j) {
final_result = negate ?
-OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) :
OP2::Map(ograd[base + j*sa], out[base + j*sa], sum);
KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result);
}
} else {
for (index_t j = 0; j < M; ++j) {
final_result = negate ?
-OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature :
OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature;
KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result);
}
}
}
}
}
template<typename OP1, typename OP2, int Req, bool negate, typename AType, int ndim,
typename DType>
inline void MaskedSoftmaxGrad(Stream<cpu> *s, DType *out, DType *ograd,
DType *igrad, bool *mask, Shape<ndim> data_shape,
Shape<ndim> mask_shape, int axis,
const double temperature,
const OpContext& ctx) {
Tensor<cpu, 1, DType> workspace = ctx.requested[0].get_space_typed<cpu, 1, DType>(
Shape1(data_shape.Size()), s);
DType* masked_ograd = TBlob(workspace).dptr<DType>();
Kernel<masked_softmax_where, cpu>::Launch(s, data_shape.Size(), masked_ograd,
mask, ograd, 0.0, data_shape, mask_shape);
int* max_lenghts = nullptr;
SoftmaxGrad<OP1, OP2, Req, negate, AType, DType, DType, int, ndim>(
s, out, masked_ograd, igrad,
max_lenghts, data_shape,
axis, temperature);
Kernel<masked_softmax_where, cpu>::Launch(s, data_shape.Size(), igrad,
mask, igrad, 0.0, data_shape, mask_shape);
}
#ifdef __CUDACC__
template<int x_bits, typename OP, bool negate, typename AType, int ndim,
typename DType, typename OType, typename IType>
__global__ void softmax_compute_kernel(DType *in, OType *out, IType *length,
index_t M, int axis, Shape<ndim> sshape,
Shape<ndim> stride, const double temperature) {
const unsigned x_size = 1 << x_bits;
__shared__ AType smem[x_size];
index_t sa = stride[axis];
index_t base = unravel_dot(blockIdx.x, sshape, stride);
index_t x = threadIdx.x;
const index_t len = length == nullptr ? M : static_cast<index_t>(length[blockIdx.x]);
red::maximum::SetInitValue(smem[x]);
for (index_t i = x; i < len; i += x_size) {
smem[x] = ::max(smem[x], negate ? -in[base + i*sa] : in[base + i*sa]);
}
__syncthreads();
cuda::Reduce1D<red::maximum, x_bits>(smem);
__syncthreads();
DType smax = smem[0];
__syncthreads();
red::sum::SetInitValue(smem[x]);
DType val;
for (index_t i = x; i < len; i += x_size) {
val = negate ? -in[base + i*sa]:in[base + i*sa];
smem[x] += static_cast<AType>(expf((val - smax) / static_cast<AType>(temperature)));
}
__syncthreads();
cuda::Reduce1D<red::sum, x_bits>(smem);
__syncthreads();
AType ssum = smem[0];
__syncthreads();
for (index_t i = x; i < M; i += x_size) {
val = negate ? -in[base + i*sa] : in[base + i*sa];
out[base + i*sa] =
(i < len) ? OType(OP::Map((val - smax)/static_cast<DType>(temperature), ssum)) : OType(0.0f);
}
}
const int softmax_threads_per_block = 512;
template<typename OP, bool negate, typename AType, typename LType,
typename DType, typename OType, typename IType>
__global__ void softmax_stride1_compute_kernel(const DType *in, OType *out, IType *length,
const index_t M, const double temperature,
const int rows_per_block, const index_t total_rows) {
__shared__ AType scratch[softmax_threads_per_block];
__shared__ LType persistent_storage[20 * 1024 / sizeof(LType)];
const int warp_size = 32;
const int threads_per_row = softmax_threads_per_block / rows_per_block;
const int my_local_row = threadIdx.x / threads_per_row;
const int my_row = blockIdx.x * rows_per_block + my_local_row;
if (my_row >= total_rows) return;
const int my_id = threadIdx.x % threads_per_row;
const int entries_per_load = sizeof(LType)/sizeof(DType);
const index_t len = length == nullptr ? M : static_cast<index_t>(length[my_row]);
// Due to usage of MSHADOW_TYPE_SWITCH macro we are generating
// kernels where sizeof(LType) may be less than sizeof(DType),
// resulting in entries_per_load being 0.
// This is not a valid combination and is being checked against
// in the launcher code. This switch here is just to silence
// the division by zero warning generated for such invalid cases.
const int row_length = entries_per_load > 0 ? M / entries_per_load : 0;
const LType* in_aligned = reinterpret_cast<const LType*>(in);
size_t base = my_row * row_length;
for (index_t i = my_id; i < row_length; i += threads_per_row) {
persistent_storage[my_local_row * row_length + i] = in_aligned[base + i];
}
DType * row = reinterpret_cast<DType *>(persistent_storage + my_local_row * row_length);
__syncthreads();
DType my_max_value;
red::maximum::SetInitValue(my_max_value);
for (index_t i = my_id; i < len; i += threads_per_row) {
my_max_value = ::max(my_max_value, negate ? -row[i] : row[i]);
}
scratch[threadIdx.x] = my_max_value;
__syncthreads();
for (int size = threads_per_row / 2; size >= warp_size; size /= 2) {
if (my_id < size) {
scratch[threadIdx.x] = ::max(scratch[threadIdx.x], scratch[threadIdx.x + size]);
}
__syncthreads();
}
if (my_id < warp_size) {
AType my_value = common::cuda::warp_reduce(scratch[threadIdx.x],
[](AType x, AType y) { return ::max(x, y); });
scratch[threadIdx.x] = my_value;
}
__syncthreads();
DType smax = scratch[threadIdx.x - threadIdx.x % threads_per_row];
__syncthreads();
AType my_sum;
red::sum::SetInitValue(my_sum);
for (index_t i = my_id; i < len; i += threads_per_row) {
const DType val = negate ? -row[i] : row[i];
my_sum += static_cast<AType>(expf((val - smax) / static_cast<AType>(temperature)));
}
scratch[threadIdx.x] = my_sum;
__syncthreads();
for (int size = threads_per_row / 2; size >= warp_size; size /= 2) {
if (my_id < size) {
scratch[threadIdx.x] += scratch[threadIdx.x + size];
}
__syncthreads();
}
if (my_id < warp_size) {
AType my_value = common::cuda::warp_reduce(scratch[threadIdx.x],
[](AType x, AType y) { return x + y;});
scratch[threadIdx.x] = my_value;
}
__syncthreads();
AType ssum = scratch[threadIdx.x - threadIdx.x % threads_per_row];
__syncthreads();
for (index_t i = my_id; i < M; i += threads_per_row) {
const DType val = negate ? -row[i] : row[i];
row[i] = (i < len) ? DType(OP::Map((val - smax)/static_cast<DType>(temperature), ssum)) :
DType(0.0f);
}
__syncthreads();
LType* out_aligned = reinterpret_cast<LType*>(out);
for (index_t i = my_id; i < row_length; i += threads_per_row) {
out_aligned[base + i] = persistent_storage[my_local_row * row_length + i];
}
}
template<int ndim>
MSHADOW_XINLINE index_t get_mask_position(const index_t idx, const Shape<ndim>& data_shape,
const Shape<ndim>& mask_shape, int axis, index_t* stride_axis) {
index_t ret = 0;
index_t stride = 1;
*stride_axis = 1;
#pragma unroll
for (index_t i = ndim-1, j = idx; i >=0; --i) {
auto tmp = j / data_shape[i];
if (i != axis && mask_shape[i] != 1) {
ret += (j - tmp * mask_shape[i]) * stride;
if (i > axis)
*stride_axis *= mask_shape[i];
}
stride *= mask_shape[i];
j = tmp;
}
return ret;
}
template<bool normalize, int x_bits, typename OP, bool masked_neg_inf,
bool negate, typename AType, int ndim, typename DType>
__global__ void masked_softmax_kernel(DType *in, DType *out, bool *in_mask,
index_t M, int axis, Shape<ndim> sshape,
Shape<ndim> stride, Shape<ndim> mask_shape,
const double temperature) {
extern __shared__ double shared[];
AType* smem = reinterpret_cast<AType*>(shared); // x_size
const unsigned x_size = 1 << x_bits;
index_t sa = stride[axis];
index_t base = unravel_dot(blockIdx.x, sshape, stride);
index_t sa_mask = 0;
index_t base_mask = get_mask_position(blockIdx.x, sshape, mask_shape, axis, &sa_mask);
bool bcst_mask_axis = (mask_shape[axis] == 1);
index_t x = threadIdx.x;
DType smax = 0.0;
if (normalize) {
red::maximum::SetInitValue(smem[x]);
for (index_t i = x; i < M; i += x_size) {
bool mask_value = bcst_mask_axis ? in_mask[base_mask] : in_mask[base_mask + i*sa_mask];
if (mask_value)
smem[x] = ::max(smem[x], negate ? -in[base + i*sa] : in[base + i*sa]);
}
__syncthreads();
cuda::Reduce1D<red::maximum, x_bits>(smem);
__syncthreads();
smax = smem[0];
__syncthreads();
}
red::sum::SetInitValue(smem[x]);
DType val;
for (index_t i = x; i < M; i += x_size) {
bool mask_value = bcst_mask_axis ? in_mask[base_mask] : in_mask[base_mask + i*sa_mask];
if (mask_value) {
val = (negate ? -in[base + i*sa]:in[base + i*sa]);
smem[x] += static_cast<AType>(expf((val - smax) / static_cast<AType>(temperature)));
}
}
__syncthreads();
cuda::Reduce1D<red::sum, x_bits>(smem);
__syncthreads();
AType ssum = smem[0];
__syncthreads();
double masked_value = 0.0;
if (masked_neg_inf)
masked_value = -INFINITY;
for (index_t i = x; i < M; i += x_size) {
val = (negate ? -in[base + i*sa] : in[base + i*sa]);
bool mask_value = bcst_mask_axis ? in_mask[base_mask] : in_mask[base_mask + i*sa_mask];
out[base + i*sa] =
mask_value ? DType(OP::Map((val - smax)/static_cast<DType>(temperature), ssum)) :
DType(masked_value);
}
}
template<bool normalize, typename OP, bool masked_neg_inf, bool negate, typename AType,
typename LType, typename LTypeMask, typename DType, int ndim>
__global__ void masked_softmax_stride1_kernel(const DType *in, DType *out, bool *in_mask,
const index_t M, int axis, Shape<ndim> sshape,
Shape<ndim> mask_shape,
const double temperature,
const int rows_per_block,
const index_t total_rows,
const size_t size_input_shared,
const size_t size_mask_shared) {
const int entries_per_load = sizeof(LType)/sizeof(DType);
const int entries_per_load_mask = sizeof(LTypeMask)/sizeof(bool);
const int row_length = entries_per_load > 0 ? M / entries_per_load : 0;
const int row_length_mask = entries_per_load > 0 ? M / entries_per_load_mask : 0;
extern __shared__ double shared[];
LType* persistent_storage = reinterpret_cast<LType*>(shared);
// rows_per_block * M (DType), aligned to double
LTypeMask* mask_shared = reinterpret_cast<LTypeMask*>(&shared[size_input_shared]);
// rows_per_block * M (bool), aligned to double
AType* scratch = reinterpret_cast<AType*>(&shared[size_input_shared + size_mask_shared]);
// softmax_threads_per_block
const int warp_size = 32;
const int threads_per_row = softmax_threads_per_block / rows_per_block;
const int my_local_row = threadIdx.x / threads_per_row;
const int my_row = blockIdx.x * rows_per_block + my_local_row;
if (my_row >= total_rows) return;
const int my_id = threadIdx.x % threads_per_row;
size_t base = my_row * row_length;
index_t pos_mask = 0;
index_t stride = mask_shape[axis];
#pragma unroll
for (index_t i = axis-1, j = my_row; i >=0; --i) {
auto tmp = j / sshape[i];
if (mask_shape[i] != 1) {
pos_mask += (j - tmp * mask_shape[i]) * stride;
stride *= mask_shape[i];
}
j = tmp;
}
const LType* in_aligned = reinterpret_cast<const LType*>(in);
for (index_t i = my_id; i < row_length; i += threads_per_row) {
persistent_storage[my_local_row * row_length + i] = in_aligned[base + i];
}
const LTypeMask* in_mask_aligned = reinterpret_cast<const LTypeMask*>(&in_mask[pos_mask]);
for (index_t i = my_id; i < row_length_mask; i += threads_per_row) {
mask_shared[my_local_row * row_length_mask + i] = (mask_shape[axis] > 1) ?
in_mask_aligned[i] :
in_mask_aligned[0];
}
DType* row = reinterpret_cast<DType*>(persistent_storage + my_local_row * row_length);
bool* row_mask = reinterpret_cast<bool*>(mask_shared + my_local_row * row_length_mask);
__syncthreads();
DType smax = 0.0;
if (normalize) {
DType my_max_value;
red::maximum::SetInitValue(my_max_value);
for (index_t i = my_id; i < M; i += threads_per_row) {
if (row_mask[i])
my_max_value = ::max(my_max_value, negate ? -row[i] : row[i]);
}
scratch[threadIdx.x] = my_max_value;
__syncthreads();
for (int size = threads_per_row / 2; size >= warp_size; size /= 2) {
if (my_id < size) {
scratch[threadIdx.x] = ::max(scratch[threadIdx.x], scratch[threadIdx.x + size]);
}
__syncthreads();
}
if (my_id < warp_size) {
AType my_value = common::cuda::warp_reduce(scratch[threadIdx.x],
[](AType x, AType y) { return ::max(x, y); });
scratch[threadIdx.x] = my_value;
}
__syncthreads();
smax = scratch[threadIdx.x - threadIdx.x % threads_per_row];
__syncthreads();
}
AType my_sum;
red::sum::SetInitValue(my_sum);
for (index_t i = my_id; i < M; i += threads_per_row) {
if (row_mask[i]) {
const DType val = (negate ? -row[i] : row[i]);
my_sum += static_cast<AType>(expf((val - smax) / static_cast<AType>(temperature)));
}
}
scratch[threadIdx.x] = my_sum;
__syncthreads();
for (int size = threads_per_row / 2; size >= warp_size; size /= 2) {
if (my_id < size) {
scratch[threadIdx.x] += scratch[threadIdx.x + size];
}
__syncthreads();
}
if (my_id < warp_size) {
AType my_value = common::cuda::warp_reduce(scratch[threadIdx.x],
[](AType x, AType y) { return x + y;});
scratch[threadIdx.x] = my_value;
}
__syncthreads();
AType ssum = scratch[threadIdx.x - threadIdx.x % threads_per_row];
__syncthreads();
double masked_value = 0.0;
if (masked_neg_inf)
masked_value = -INFINITY;
for (index_t i = my_id; i < M; i += threads_per_row) {
const DType val = (negate ? -row[i] : row[i]);
row[i] = row_mask[i] ? DType(OP::Map((val - smax)/static_cast<DType>(temperature), ssum)) :
DType(masked_value);
}
__syncthreads();
LType* out_aligned = reinterpret_cast<LType*>(out);
for (index_t i = my_id; i < row_length; i += threads_per_row) {
out_aligned[base + i] = persistent_storage[my_local_row * row_length + i];
}
}
template<typename OP, bool negate, typename AType, typename DType, typename OType,
typename IType, int ndim>
inline void Softmax(Stream<gpu> *s, DType *in, OType *out, IType *length,
Shape<ndim> shape, int axis, const double temperature) {
const int x_bits = 7;
const int x_size = 1 << x_bits;
index_t M = shape[axis];
if (M == 0 || shape.Size() == 0) return;
index_t N = shape.Size()/M;
Shape<ndim> stride = calc_stride(shape);
Shape<ndim> sshape = shape;
sshape[axis] = 1;
const size_t DSize = sizeof(DType);
// Using 20 kB of shared memory for persistent storage in the optimized case
const size_t max_opt_M = 20 * 1024 / DSize;
if (stride[axis] == 1 &&
static_cast<size_t>(M) <= max_opt_M &&
std::is_same<DType, OType>::value) {
int ltype = mxnet::common::cuda::get_load_type(M * sizeof(DType));
MXNET_LOAD_TYPE_SWITCH(ltype, LType, {
int rows_per_block = mxnet::common::cuda::get_rows_per_block(M *
sizeof(DType) / sizeof(LType),
softmax_threads_per_block);
int nblocks = (N + rows_per_block - 1) / rows_per_block;
CHECK_LE(sizeof(DType), sizeof(LType));
softmax_stride1_compute_kernel<OP, negate, AType, LType>
<<<nblocks, softmax_threads_per_block, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
in, out, length, M, temperature, rows_per_block, N);
});
MSHADOW_CUDA_POST_KERNEL_CHECK(softmax_stride1_compute_kernel);
} else {
softmax_compute_kernel<x_bits, OP, negate, AType, ndim>
<<<N, x_size, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
in, out, length, M, axis, sshape, stride, temperature);
MSHADOW_CUDA_POST_KERNEL_CHECK(softmax_compute_kernel);
}
}
template<typename OP, bool masked_neg_inf, bool negate,
typename AType, typename DType, typename OType, int ndim>
inline void MaskedSoftmax(Stream<gpu> *s, DType *in, OType *out, bool *mask,
Shape<ndim> data_shape, Shape<ndim> mask_shape,
int axis, const double temperature,
bool normalize, const OpContext& ctx) {
const int x_bits = 7;
const int x_size = 1 << x_bits;
index_t M = data_shape[axis];
if (M == 0 || data_shape.Size() == 0) return;
index_t N = data_shape.Size() / M;
Shape<ndim> stride = calc_stride(data_shape);
Shape<ndim> sshape = data_shape;
sshape[axis] = 1;
const size_t DSize = sizeof(DType);
// Using max of 20 kB of shared memory for InputData in the optimized case
const size_t max_opt_M = 20 * 1024 / DSize;
if (stride[axis] == 1 &&
static_cast<size_t>(M) <= max_opt_M &&
std::is_same<DType, OType>::value) {
int ltype = mxnet::common::cuda::get_load_type(M * sizeof(DType));
int ltype_mask = mxnet::common::cuda::get_load_type(mask_shape[axis] * sizeof(bool));
MXNET_LOAD_TYPE_SWITCH(ltype, LType, {
CHECK_LE(sizeof(DType), sizeof(LType));
MXNET_LOAD_TYPE_SWITCH(ltype_mask, LTypeMask, {
CHECK_LE(sizeof(bool), sizeof(LTypeMask));
int rows_per_block = mxnet::common::cuda::
get_rows_per_block(M *
sizeof(DType) / sizeof(LType),
softmax_threads_per_block);
// calculate amount shared memory (slots aligned to double)
int entries_per_load = entries_per_load = sizeof(LType)/sizeof(DType);
int entries_per_load_mask = sizeof(LTypeMask)/sizeof(bool);
size_t size_input_shared = entries_per_load > 0 ?
rows_per_block * M / entries_per_load : 0;
size_t size_mask_shared = entries_per_load_mask > 0 ?
rows_per_block * M / entries_per_load_mask : 0;
size_input_shared = ((size_input_shared * sizeof(LType) + sizeof(double) - 1) /
sizeof(double));
size_mask_shared = ((size_mask_shared * sizeof(LTypeMask) + sizeof(double) - 1) /
sizeof(double));
size_t amount_shared = size_input_shared * sizeof(double) +
size_mask_shared * sizeof(double) +
softmax_threads_per_block * sizeof(AType);
int nblocks = (N + rows_per_block - 1) / rows_per_block;
if (normalize) {
masked_softmax_stride1_kernel<true, OP, masked_neg_inf, negate,
AType, LType, LTypeMask>
<<<nblocks, softmax_threads_per_block, amount_shared,
mshadow::Stream<gpu>::GetStream(s)>>>(
in, out, mask, M, axis, sshape, mask_shape, temperature,
rows_per_block, N, size_input_shared, size_mask_shared);
} else {
masked_softmax_stride1_kernel<false, OP, masked_neg_inf, negate,
AType, LType, LTypeMask>
<<<nblocks, softmax_threads_per_block, amount_shared,
mshadow::Stream<gpu>::GetStream(s)>>>(
in, out, mask, M, axis, sshape, mask_shape, temperature,
rows_per_block, N, size_input_shared, size_mask_shared);
}
});
});
MSHADOW_CUDA_POST_KERNEL_CHECK(masked_softmax_stride1_kernel);
} else {
size_t amount_shared = x_size * sizeof(AType);
if (normalize) {
masked_softmax_kernel<true, x_bits, OP, masked_neg_inf, negate, AType, ndim>
<<<N, x_size, amount_shared, mshadow::Stream<gpu>::GetStream(s)>>>(
in, out, mask, M, axis, sshape, stride, mask_shape, temperature);
} else {
masked_softmax_kernel<false, x_bits, OP, masked_neg_inf, negate, AType, ndim>
<<<N, x_size, amount_shared, mshadow::Stream<gpu>::GetStream(s)>>>(
in, out, mask, M, axis, sshape, stride, mask_shape, temperature);
}
MSHADOW_CUDA_POST_KERNEL_CHECK(masked_softmax_kernel);
}
}
template<typename OP1, typename OP2, int Req, bool negate, typename AType, typename LType,
typename DType, typename OType, typename IType>
__global__ void softmax_stride1_grad_kernel(const OType *out, const OType *ograd,
DType *igrad, const IType *length,
const index_t M,
const double temperature,
const int rows_per_block,
const index_t total_rows) {
__shared__ AType scratch[softmax_threads_per_block];
__shared__ LType persistent_storage[20 * 1024 / sizeof(LType)];
const int warp_size = 32;
const int threads_per_row = softmax_threads_per_block / rows_per_block;
const int my_local_row = threadIdx.x / threads_per_row;
const int my_row = blockIdx.x * rows_per_block + my_local_row;
if (my_row >= total_rows) return;
const int my_id = threadIdx.x % threads_per_row;
const int entries_per_load = sizeof(LType)/sizeof(DType);
const index_t len = length == nullptr ? M : static_cast<index_t>(length[my_row]);
// Due to usage of MSHADOW_TYPE_SWITCH macro we are generating
// kernels where sizeof(LType) may be less than sizeof(DType),
// resulting in entries_per_load being 0.
// This is not a valid combination and is being checked against
// in the launcher code. This switch here is just to silence
// the division by zero warning generated for such invalid cases.
const int row_length = entries_per_load > 0 ? M / entries_per_load : 0;
const LType* out_aligned = reinterpret_cast<const LType*>(out);
const LType* ograd_aligned = reinterpret_cast<const LType*>(ograd);
size_t base = my_row * row_length;
for (index_t i = my_id; i < row_length; i += threads_per_row) {
persistent_storage[my_local_row * row_length * 2 + i] = out_aligned[base + i];
persistent_storage[my_local_row * row_length * 2 + row_length + i] = ograd_aligned[base + i];
}
DType * row = reinterpret_cast<DType *>(persistent_storage + my_local_row * row_length * 2);
__syncthreads();
AType my_sum_value;
red::sum::SetInitValue(my_sum_value);
for (index_t i = my_id; i < len; i += threads_per_row) {
my_sum_value += OP1::Map(row[i + M], row[i]);
}
scratch[threadIdx.x] = my_sum_value;
__syncthreads();
for (int size = threads_per_row / 2; size >= warp_size; size /= 2) {
if (my_id < size) {
scratch[threadIdx.x] = scratch[threadIdx.x] + scratch[threadIdx.x + size];
}
__syncthreads();
}
if (my_id < warp_size) {
AType my_value = common::cuda::warp_reduce(scratch[threadIdx.x],
[](AType x, AType y) { return x + y; });
scratch[threadIdx.x] = my_value;
}
__syncthreads();
AType ssum = scratch[threadIdx.x - threadIdx.x % threads_per_row];
__syncthreads();
for (index_t i = my_id; i < M; i += threads_per_row) {
const DType val =
negate ?
-OP2::Map(row[i + M], row[i], ssum) :
OP2::Map(row[i + M], row[i], ssum);
row[i] = (i < len) ? DType(val / static_cast<DType>(temperature)) :
DType(0.0f);
if (Req == kAddTo) {
row[i] += igrad[my_row * M + i];
}
}
__syncthreads();
LType* igrad_aligned = reinterpret_cast<LType*>(igrad);
for (index_t i = my_id; i < row_length; i += threads_per_row) {
igrad_aligned[base + i] = persistent_storage[my_local_row * row_length * 2 + i];
}
}
template<int x_bits, typename OP1, typename OP2, int Req, bool negate, typename AType, int ndim,
typename DType, typename OType, typename IType>
__global__ void softmax_grad_kernel(OType *out, OType *ograd, DType *igrad,
const IType *length, index_t M, int axis,
Shape<ndim> sshape, Shape<ndim> stride,
const double temperature) {
const unsigned x_size = 1 << x_bits;
__shared__ AType smem[x_size];
index_t sa = stride[axis];
index_t base = unravel_dot(blockIdx.x, sshape, stride);
index_t x = threadIdx.x;
index_t len = length != nullptr ? static_cast<index_t>(length[blockIdx.x]) : M;
red::sum::SetInitValue(smem[x]);
for (index_t i = x; i < len; i += x_size) {
smem[x] += OP1::Map(ograd[base + i*sa], out[base + i*sa]);
}
__syncthreads();
cuda::Reduce1D<red::sum, x_bits>(smem);
__syncthreads();
AType ssum = smem[0];
__syncthreads();
DType final_result;
for (index_t i = x; i < M; i += x_size) {
final_result =
negate ?
-OP2::Map(ograd[base + i*sa], out[base + i*sa], ssum) :
OP2::Map(ograd[base + i*sa], out[base + i*sa], ssum);
final_result = (i < len) ? final_result : DType(0.0f);
KERNEL_ASSIGN(igrad[base + i*sa], Req, final_result / static_cast<DType>(temperature));
}
}
template<typename OP1, typename OP2, int Req, bool negate, typename AType, typename LType,
typename LTypeMask, typename DType, typename OType, int ndim>
__global__ void masked_softmax_stride1_grad_kernel(const OType *out, const OType *ograd,
DType *igrad, const bool *in_mask,
const index_t M, int axis,
Shape<ndim> sshape,
Shape<ndim> mask_shape,
const double temperature,
const int rows_per_block,
const index_t total_rows,
const size_t size_input_shared,
const size_t size_mask_shared) {
const int entries_per_load = sizeof(LType)/sizeof(DType);
const int entries_per_load_mask = sizeof(LTypeMask)/sizeof(bool);
const int row_length = entries_per_load > 0 ? M / entries_per_load : 0;
const int row_length_mask = entries_per_load > 0 ? M / entries_per_load_mask : 0;
extern __shared__ double shared[];
LType* persistent_storage = reinterpret_cast<LType*>(shared);
// 2 * rows_per_block * M (DType), aligned to double
LTypeMask* mask_shared = reinterpret_cast<LTypeMask*>(&shared[size_input_shared]);
// rows_per_block * M (bool), aligned to double
AType* scratch = reinterpret_cast<AType*>(&shared[size_input_shared + size_mask_shared]);
// softmax_threads_per_block
const int warp_size = 32;
const int threads_per_row = softmax_threads_per_block / rows_per_block;
const int my_local_row = threadIdx.x / threads_per_row;
const int my_row = blockIdx.x * rows_per_block + my_local_row;
if (my_row >= total_rows) return;
const int my_id = threadIdx.x % threads_per_row;
size_t base = my_row * row_length;
index_t pos_mask = 0;
index_t stride = mask_shape[axis];
#pragma unroll
for (index_t i = axis - 1, j = my_row; i >=0; --i) {
auto tmp = j / sshape[i];
if (mask_shape[i] != 1) {
pos_mask += (j - tmp * mask_shape[i]) * stride;
stride *= mask_shape[i];
}
j = tmp;
}
const LType* out_aligned = reinterpret_cast<const LType*>(out);
const LType* ograd_aligned = reinterpret_cast<const LType*>(ograd);
for (index_t i = my_id; i < row_length; i += threads_per_row) {
persistent_storage[my_local_row * row_length * 2 + i] = out_aligned[base + i];
persistent_storage[my_local_row * row_length * 2 + row_length + i] = ograd_aligned[base + i];
}
const LTypeMask* in_mask_aligned = reinterpret_cast<const LTypeMask*>(&in_mask[pos_mask]);
for (index_t i = my_id; i < row_length_mask; i += threads_per_row) {
mask_shared[my_local_row * row_length_mask + i] = (mask_shape[axis] > 1) ?
in_mask_aligned[i] :
in_mask_aligned[0];
}
DType* row = reinterpret_cast<DType *>(persistent_storage + my_local_row * row_length * 2);
bool* row_mask = reinterpret_cast<bool*>(mask_shared + my_local_row * row_length_mask);
__syncthreads();
AType my_sum_value;
red::sum::SetInitValue(my_sum_value);
for (index_t i = my_id; i < M; i += threads_per_row) {
if (row_mask[i])
my_sum_value += OP1::Map(row[i + M], row[i]);
}
scratch[threadIdx.x] = my_sum_value;
__syncthreads();
for (int size = threads_per_row / 2; size >= warp_size; size /= 2) {
if (my_id < size) {
scratch[threadIdx.x] = scratch[threadIdx.x] + scratch[threadIdx.x + size];
}
__syncthreads();
}
if (my_id < warp_size) {
AType my_value = common::cuda::warp_reduce(scratch[threadIdx.x],
[](AType x, AType y) { return x + y; });
scratch[threadIdx.x] = my_value;
}
__syncthreads();
AType ssum = scratch[threadIdx.x - threadIdx.x % threads_per_row];
__syncthreads();
for (index_t i = my_id; i < M; i += threads_per_row) {
const DType val =
negate ?
-OP2::Map(row[i + M], row[i], ssum):
OP2::Map(row[i + M], row[i], ssum);
row[i] = row_mask[i] ? DType(val / static_cast<DType>(temperature)) :
DType(0.0f);
if (Req == kAddTo) {
row[i] += igrad[my_row * M + i];
}
}
__syncthreads();
LType* igrad_aligned = reinterpret_cast<LType*>(igrad);
for (index_t i = my_id; i < row_length; i += threads_per_row) {
igrad_aligned[base + i] = persistent_storage[my_local_row * row_length * 2 + i];
}
}
template<int x_bits, typename OP1, typename OP2, int Req, bool negate, typename AType, int ndim,
typename DType, typename OType>
__global__ void masked_softmax_grad_kernel(OType *out, OType *ograd, DType *igrad,
const bool *in_mask, index_t M, int axis,
Shape<ndim> sshape, Shape<ndim> stride,
Shape<ndim> mask_shape,
const double temperature) {
const unsigned x_size = 1 << x_bits;
__shared__ AType smem[x_size];
index_t sa = stride[axis];
index_t base = unravel_dot(blockIdx.x, sshape, stride);
index_t sa_mask = 0;
index_t base_mask = get_mask_position(blockIdx.x, sshape, mask_shape, axis, &sa_mask);
bool bcst_mask_axis = (mask_shape[axis] == 1);
index_t x = threadIdx.x;
red::sum::SetInitValue(smem[x]);
for (index_t i = x; i < M; i += x_size) {
bool mask_value = bcst_mask_axis ? in_mask[base_mask] : in_mask[base_mask + i*sa_mask];
if (mask_value)
smem[x] += OP1::Map(ograd[base + i*sa], out[base + i*sa]);
}
__syncthreads();
cuda::Reduce1D<red::sum, x_bits>(smem);
__syncthreads();
AType ssum = smem[0];
__syncthreads();
DType final_result;
for (index_t i = x; i < M; i += x_size) {
bool mask_value = bcst_mask_axis ? in_mask[base_mask] : in_mask[base_mask + i*sa_mask];
final_result =
negate ?
-OP2::Map(ograd[base + i*sa], out[base + i*sa], ssum):
OP2::Map(ograd[base + i*sa], out[base + i*sa], ssum);
final_result = mask_value ? final_result / static_cast<DType>(temperature) : DType(0.0f);
KERNEL_ASSIGN(igrad[base + i*sa], Req, final_result);
}
}
template<typename OP1, typename OP2, int Req, bool negate, typename AType, int ndim,
typename DType, typename OType, typename IType>
inline void SoftmaxGrad(Stream<gpu> *s, OType *out, OType *ograd,
DType *igrad, IType *length, Shape<ndim> shape, int axis,
const double temperature) {
const int x_bits = 7;
const int x_size = 1 << x_bits;
index_t M = shape[axis];
if (M == 0 || shape.Size() == 0) return;
index_t N = shape.Size()/M;
Shape<ndim> stride = calc_stride(shape);
Shape<ndim> sshape = shape;
sshape[axis] = 1;
const size_t DSize = sizeof(DType);
// Using 20 kB of shared memory for persistent storage in the optimized case
// Need to store both out and ograd, so M can be only half compared to
// forward pass.
const size_t max_opt_M = 20 * 1024 / DSize / 2;
if (stride[axis] == 1 &&
static_cast<size_t>(M) <= max_opt_M &&
std::is_same<DType, OType>::value) {
int ltype = mxnet::common::cuda::get_load_type(M * sizeof(DType));
MXNET_LOAD_TYPE_SWITCH(ltype, LType, {
int rows_per_block = mxnet::common::cuda::get_rows_per_block(M *
sizeof(DType) / sizeof(LType),
softmax_threads_per_block);
int nblocks = (N + rows_per_block - 1) / rows_per_block;
CHECK_LE(sizeof(DType), sizeof(LType));
softmax_stride1_grad_kernel<OP1, OP2, Req, negate, AType, LType>
<<<nblocks, softmax_threads_per_block, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
out, ograd, igrad, length, M, temperature, rows_per_block, N);
});
MSHADOW_CUDA_POST_KERNEL_CHECK(softmax_stride1_grad_kernel);
} else {
softmax_grad_kernel<x_bits, OP1, OP2, Req, negate, AType, ndim>
<<<N, x_size, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
out, ograd, igrad, length, M, axis, sshape, stride, temperature);
MSHADOW_CUDA_POST_KERNEL_CHECK(softmax_grad_kernel);
}
}
template<typename OP1, typename OP2, int Req, bool negate, typename AType, int ndim,
typename DType, typename OType>
inline void MaskedSoftmaxGrad(Stream<gpu> *s, OType *out, OType *ograd,
DType *igrad, bool *mask, Shape<ndim> data_shape,
Shape<ndim> mask_shape, int axis,
const double temperature,
const OpContext& ctx) {
const int x_bits = 7;
const int x_size = 1 << x_bits;
index_t M = data_shape[axis];
if (M == 0 || data_shape.Size() == 0) return;
index_t N = data_shape.Size() / M;
Shape<ndim> stride = calc_stride(data_shape);
Shape<ndim> sshape = data_shape;
sshape[axis] = 1;
const size_t DSize = sizeof(DType);
// Using max of 20 kB of shared memory for InputData in the optimized case
const size_t max_opt_M = 20 * 1024 / DSize;
if (stride[axis] == 1 &&
static_cast<size_t>(M) <= max_opt_M &&
std::is_same<DType, OType>::value) {
int ltype = mxnet::common::cuda::get_load_type(M * sizeof(DType));
int ltype_mask = mxnet::common::cuda::get_load_type(mask_shape[axis] * sizeof(bool));
MXNET_LOAD_TYPE_SWITCH(ltype, LType, {
CHECK_LE(sizeof(DType), sizeof(LType));
MXNET_LOAD_TYPE_SWITCH(ltype_mask, LTypeMask, {
CHECK_LE(sizeof(bool), sizeof(LTypeMask));
int rows_per_block = mxnet::common::cuda::
get_rows_per_block(M *
sizeof(DType) / sizeof(LType),
softmax_threads_per_block);
// calculate amount shared memory (slots aligned to double)
int entries_per_load = entries_per_load = sizeof(LType)/sizeof(DType);
int entries_per_load_mask = sizeof(LTypeMask)/sizeof(bool);
size_t size_input_shared = entries_per_load > 0 ?
rows_per_block * M / entries_per_load : 0;
size_t size_mask_shared = entries_per_load_mask > 0 ?
rows_per_block * M / entries_per_load_mask : 0;
size_input_shared = ((2 * size_input_shared * sizeof(LType) + sizeof(double) - 1) /
sizeof(double));
size_mask_shared = ((size_mask_shared * sizeof(LTypeMask) + sizeof(double) - 1) /
sizeof(double));
size_t amount_shared = size_input_shared * sizeof(double) +
size_mask_shared * sizeof(double) +
softmax_threads_per_block * sizeof(AType);
int nblocks = (N + rows_per_block - 1) / rows_per_block;
masked_softmax_stride1_grad_kernel<OP1, OP2, Req, negate, AType, LType, LTypeMask>
<<<nblocks, softmax_threads_per_block, amount_shared,
mshadow::Stream<gpu>::GetStream(s)>>>(
out, ograd, igrad, mask, M, axis, sshape, mask_shape,
temperature, rows_per_block, N, size_input_shared, size_mask_shared);
});
});
MSHADOW_CUDA_POST_KERNEL_CHECK(masked_softmax_stride1_grad_kernel);
} else {
masked_softmax_grad_kernel<x_bits, OP1, OP2, Req, negate, AType, ndim>
<<<N, x_size, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
out, ograd, igrad, mask, M, axis, sshape, stride, mask_shape, temperature);
MSHADOW_CUDA_POST_KERNEL_CHECK(masked_softmax_grad_kernel);
}
}
#endif
} // namespace mxnet_op
struct SoftmaxParam : public dmlc::Parameter<SoftmaxParam> {
int axis;
dmlc::optional<double> temperature;
dmlc::optional<int> dtype;
dmlc::optional<bool> use_length;
DMLC_DECLARE_PARAMETER(SoftmaxParam) {
DMLC_DECLARE_FIELD(axis).set_default(-1)
.describe("The axis along which to compute softmax.");
DMLC_DECLARE_FIELD(temperature).set_default(dmlc::optional<double>())
.describe("Temperature parameter in softmax");
DMLC_DECLARE_FIELD(dtype)
.add_enum("float16", mshadow::kFloat16)
.add_enum("float32", mshadow::kFloat32)
.add_enum("float64", mshadow::kFloat64)
.set_default(dmlc::optional<int>())
.describe("DType of the output in case this can't be inferred. "
"Defaults to the same as input's dtype if not defined (dtype=None).");
DMLC_DECLARE_FIELD(use_length)
.set_default(dmlc::optional<bool>(false))
.describe("Whether to use the length input as a mask over the data input.");
}
bool operator==(const SoftmaxParam& other) const {
return this->axis == other.axis &&
this->temperature == other.temperature &&
this->dtype == other.dtype &&
this->use_length == other.use_length;
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream axis_s, temperature_s, dtype_s, use_length_s;
axis_s << axis;
temperature_s << temperature;
dtype_s << dtype;
use_length_s << use_length;
(*dict)["axis"] = axis_s.str();
(*dict)["temperature"] = temperature_s.str();
if (dtype.has_value()) {
(*dict)["dtype"] = MXNetTypeWithBool2String(dtype.value());
} else {
(*dict)["dtype"] = dtype_s.str();
}
(*dict)["use_length"] = use_length_s.str();
}
};
struct MaskedSoftmaxParam : public dmlc::Parameter<MaskedSoftmaxParam> {
int axis;
dmlc::optional<double> temperature;
dmlc::optional<int> dtype;
dmlc::optional<bool> normalize;
DMLC_DECLARE_PARAMETER(MaskedSoftmaxParam) {
DMLC_DECLARE_FIELD(axis).set_default(-1)
.describe("The axis along which to compute softmax.");
DMLC_DECLARE_FIELD(temperature).set_default(dmlc::optional<double>())
.describe("Temperature parameter in softmax");
DMLC_DECLARE_FIELD(normalize)
.set_default(dmlc::optional<bool>(true))
.describe("Whether to normalize input data x: x = x - max(x)");
}
};
static inline bool softmax_has_dtype_override(const nnvm::NodeAttrs& attrs) {
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
return param.dtype.has_value() && param.dtype.value() != -1;
}
static inline bool softmax_use_length(const nnvm::NodeAttrs& attrs) {
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
return param.use_length.value();
}
static inline bool SoftmaxOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(out_attrs->size(), 1);
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), softmax_use_length(attrs) ? 2U : 1U);
if (softmax_has_dtype_override(attrs)) {
TYPE_ASSIGN_CHECK(*out_attrs, 0, param.dtype.value());
type_assign(&(*in_attrs)[0], (*out_attrs)[0]);
return true;
} else {
std::vector<int> tmp = {in_attrs->at(0)};
return ElemwiseType<1, 1>(attrs, &tmp, out_attrs);
}
}
static inline bool SoftmaxOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
CHECK_EQ(out_attrs->size(), 1U);
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), param.use_length.value() ? 2U : 1U);
if (param.use_length.value()) {
mxnet::TShape& dshape = in_attrs->at(0);
mxnet::TShape tmp_shape((dshape.ndim() == 1) ? 1U : dshape.ndim() - 1, 1);
int j = 0;
int axis = param.axis != -1 ? param.axis : dshape.ndim() - 1;
for (int i = 0; i < dshape.ndim(); ++i) {
if (i != axis) {
tmp_shape[j++] = dshape[i];
}
}
SHAPE_ASSIGN_CHECK(*in_attrs, 1, tmp_shape);
}
mxnet::ShapeVector tmp = {in_attrs->at(0)};
return ElemwiseShape<1, 1>(attrs, &tmp, out_attrs);
}
static inline bool SoftmaxGradOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) {
if (softmax_use_length(attrs)) {
mxnet::ShapeVector ins = {in_attrs->at(0), in_attrs->at(1), in_attrs->at(3)};
mxnet::ShapeVector dgrad = {out_attrs->at(0)};
bool res = ElemwiseShape<3, 1>(attrs, &ins, &dgrad);
SHAPE_ASSIGN_CHECK(*in_attrs, 0, ins[0]);
SHAPE_ASSIGN_CHECK(*in_attrs, 1, ins[1]);
SHAPE_ASSIGN_CHECK(*in_attrs, 3, ins[2]);
SHAPE_ASSIGN_CHECK(*out_attrs, 0, dgrad[0]);
mxnet::ShapeVector length = {in_attrs->at(2)};
mxnet::ShapeVector lgrad = {out_attrs->at(1)};
res = (res && ElemwiseShape<1, 1>(attrs, &length, &lgrad));
SHAPE_ASSIGN_CHECK(*in_attrs, 2, length[0]);
SHAPE_ASSIGN_CHECK(*out_attrs, 1, lgrad[0]);
return res;
} else {
return ElemwiseShape<3, 1>(attrs, in_attrs, out_attrs);
}
} else {
return ElemwiseShape<2, 1>(attrs, in_attrs, out_attrs);
}
}
static inline bool SoftmaxGradOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(out_attrs->size(), softmax_use_length(attrs) ? 2U : 1U);
if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) {
CHECK_EQ(in_attrs->size(), softmax_use_length(attrs) ? 4U : 3U);
int in_dtype = (*in_attrs)[1];
int out_dtype = (*in_attrs)[softmax_use_length(attrs) ? 3 : 2];
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_dtype);
TYPE_ASSIGN_CHECK(*out_attrs, 0, in_dtype);
if (softmax_use_length(attrs)) {
TYPE_ASSIGN_CHECK(*out_attrs, 1, in_attrs->at(2));
}
return (*out_attrs)[0] != -1 && (*in_attrs)[0] != -1 &&
(!softmax_use_length(attrs) || ((*out_attrs)[1] != -1 && (*in_attrs)[1] != -1));
} else {
CHECK_EQ(in_attrs->size(), 2U);
int out_dtype = (*in_attrs)[1];
TYPE_ASSIGN_CHECK(*out_attrs, 0, out_dtype);
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_dtype);
return (*out_attrs)[0] != -1 && (*in_attrs)[0] != -1;
}
}
static inline std::vector<std::pair<int, int> >
SoftmaxGradOpInplaceOption(const nnvm::NodeAttrs& attrs) {
if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) {
if (softmax_use_length(attrs)) {
return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}, {2, 1}, {3, 0}};
} else {
return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}, {2, 0}};
}
} else {
return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}};
}
}
static inline uint32_t SoftmaxGradOpNumInputs(const nnvm::NodeAttrs& attrs) {
if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) {
return softmax_use_length(attrs) ? 4 : 3;
}
return 2;
}
static inline std::vector<std::string> SoftmaxGradOpInputNames(const nnvm::NodeAttrs& attrs) {
if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) {
if (softmax_use_length(attrs)) {
return std::vector<std::string>{"ograd", "data", "length", "output"};
} else {
return std::vector<std::string>{"ograd", "data", "output"};
}
} else {
return std::vector<std::string>{"ograd", "output"};
}
}
struct SoftmaxFGradient {
const char *op_name;
std::vector<nnvm::NodeEntry> operator()(const nnvm::ObjectPtr& n,
const std::vector<nnvm::NodeEntry>& ograds) const {
if (softmax_has_dtype_override(n->attrs) || softmax_use_length(n->attrs)) {
return ElemwiseGradUseInOut {op_name}(n, ograds);
} else {
return ElemwiseGradUseOut {op_name}(n, ograds);
}
}
};
static inline bool MaskedSoftmaxOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(out_attrs->size(), 1);
CHECK_EQ(in_attrs->size(), 2U);
std::vector<int> tmp = {in_attrs->at(0)};
return ElemwiseType<1, 1>(attrs, &tmp, out_attrs);
}
static inline bool MaskedSoftmaxOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_shape,
mxnet::ShapeVector *out_shape) {
CHECK_EQ(out_shape->size(), 1U);
CHECK_EQ(in_shape->size(), 2U);
mxnet::TShape& data_shape = (*in_shape)[0];
mxnet::TShape& mask_shape = (*in_shape)[1];
if (!mxnet::ndim_is_known(data_shape) || !mxnet::ndim_is_known(mask_shape)) {
return false;
}
CHECK(data_shape.ndim() == mask_shape.ndim())
<< "Number of dimensions in data and mask does not match";
CHECK(data_shape.ndim() > 0)
<< "Empty tuple is not allowed";
for (int i = 0; i < data_shape.ndim(); ++i) {
CHECK(data_shape[i] == mask_shape[i] || mask_shape[i] == 1)
<< "Mask cannot be broadcasted from " << mask_shape << " to " << data_shape;
}
SHAPE_ASSIGN_CHECK(*out_shape, 0, in_shape->at(0));
SHAPE_ASSIGN_CHECK(*in_shape, 0, out_shape->at(0));
return true;
}
static inline bool MaskedSoftmaxGradOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_shape,
mxnet::ShapeVector *out_shape) {
CHECK_EQ(out_shape->size(), 1U);
CHECK_EQ(in_shape->size(), 3U);
mxnet::TShape& ograd_shape = (*in_shape)[0];
mxnet::TShape& mask_shape = (*in_shape)[1];
if (!mxnet::ndim_is_known(ograd_shape) || !mxnet::ndim_is_known(mask_shape)) {
return false;
}
CHECK(ograd_shape.ndim() == mask_shape.ndim())
<< "Number of dimensions in data and mask does not match";
CHECK(ograd_shape.ndim() > 0)
<< "Empty tuple is not allowed";
for (int i = 0; i < ograd_shape.ndim(); ++i) {
CHECK(ograd_shape[i] == mask_shape[i] || mask_shape[i] == 1)
<< "Mask cannot be broadcasted from " << mask_shape << " to " << ograd_shape;
}
SHAPE_ASSIGN_CHECK(*out_shape, 0, in_shape->at(0));
SHAPE_ASSIGN_CHECK(*out_shape, 0, in_shape->at(2));
SHAPE_ASSIGN_CHECK(*in_shape, 0, out_shape->at(0));
SHAPE_ASSIGN_CHECK(*in_shape, 2, out_shape->at(0));
return true;
}
static inline bool MaskedSoftmaxGradOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(out_attrs->size(), 1U);
CHECK_EQ(in_attrs->size(), 3U);
int data_dtype = (*in_attrs)[0];
TYPE_ASSIGN_CHECK(*in_attrs, 2, data_dtype);
TYPE_ASSIGN_CHECK(*out_attrs, 0, data_dtype);
data_dtype = (*out_attrs)[0];
TYPE_ASSIGN_CHECK(*in_attrs, 0, data_dtype);
return true;
}
static inline std::vector<std::pair<int, int> >
MaskedSoftmaxGradOpInplaceOption(const nnvm::NodeAttrs& attrs) {
return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}, {2, 1}, {3, 0}};
}
template<typename xpu, typename OP, bool negate = false>
void SoftmaxCompute(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mxnet_op;
if (req[0] == kNullOp || inputs[0].Size() == 0U) return;
CHECK_NE(req[0], kAddTo);
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
int axis = CheckAxis(param.axis, inputs[0].ndim());
const double temperature = param.temperature.has_value() ?
param.temperature.value() : 1.0;
mxnet::TShape shape = AxisShapeCompact(inputs[0].shape_, &axis, true);
bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", true);
if (!safe_acc && inputs[0].type_flag_ == mshadow::kFloat16) {
common::LogOnce("MXNET_SAFE_ACCUMULATION=1 is recommended for softmax with float16 inputs. "
"See https://mxnet.apache.org/api/faq/env_var "
"for more details.");
}
MXNET_REAL_ACC_TYPE_SWITCH(inputs[0].type_flag_, DType, AType, {
MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, OType, {
int type = kInt32;
if (param.use_length.value()) {
CHECK(inputs.size() > 1)
<< "Mask needs to be provided when using softmax with use_length=True.";
type = inputs[1].type_flag_;
}
MXNET_INT32_INT64_TYPE_SWITCH(type, IType, {
IType* mask_ptr = nullptr;
if (param.use_length.value()) {
mask_ptr = inputs[1].dptr<IType>();
}
if (safe_acc) {
if (shape.ndim() == 2) {
Softmax<OP, negate, AType>(
ctx.get_stream<xpu>(), inputs[0].dptr<DType>(),
outputs[0].dptr<OType>(), mask_ptr, shape.get<2>(),
axis, static_cast<DType>(temperature));
} else {
Softmax<OP, negate, AType>(
ctx.get_stream<xpu>(), inputs[0].dptr<DType>(),
outputs[0].dptr<OType>(), mask_ptr, shape.get<3>(),
axis, static_cast<DType>(temperature));
}
} else {
if (shape.ndim() == 2) {
Softmax<OP, negate, DType>(
ctx.get_stream<xpu>(), inputs[0].dptr<DType>(),
outputs[0].dptr<OType>(), mask_ptr, shape.get<2>(),
axis, static_cast<DType>(temperature));
} else {
Softmax<OP, negate, DType>(
ctx.get_stream<xpu>(), inputs[0].dptr<DType>(),
outputs[0].dptr<OType>(), mask_ptr, shape.get<3>(),
axis, static_cast<DType>(temperature));
}
}
});
});
});
}
template<typename xpu, typename OP, bool masked_neg_inf, bool negate = false>
void MaskedSoftmaxCompute(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mxnet_op;
if (req[0] == kNullOp || inputs[0].Size() == 0U) return;
CHECK_NE(req[0], kAddTo);
const MaskedSoftmaxParam& param = nnvm::get<MaskedSoftmaxParam>(attrs.parsed);
int axis = CheckAxis(param.axis, inputs[0].ndim());
const double temperature = param.temperature.has_value() ?
param.temperature.value() : 1.0;
bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", true);
if (!safe_acc && inputs[0].type_flag_ == mshadow::kFloat16) {
common::LogOnce("MXNET_SAFE_ACCUMULATION=1 is recommended for masked_softmax with "
"float16 inputs. "
"See https://mxnet.apache.org/api/faq/env_var "
"for more details.");
}
MXNET_REAL_ACC_TYPE_SWITCH(inputs[0].type_flag_, DType, AType, {
MXNET_NDIM_SWITCH(inputs[0].ndim(), ndim, {
bool* mask_ptr = inputs[1].dptr<bool>();
if (safe_acc) {
MaskedSoftmax<OP, masked_neg_inf, negate, AType>(
ctx.get_stream<xpu>(), inputs[0].dptr<DType>(),
outputs[0].dptr<DType>(), mask_ptr,
inputs[0].shape_.get<ndim>(), inputs[1].shape_.get<ndim>(),
axis, temperature, param.normalize.value(), ctx);
} else {
MaskedSoftmax<OP, masked_neg_inf, negate, DType>(
ctx.get_stream<xpu>(), inputs[0].dptr<DType>(),
outputs[0].dptr<DType>(), mask_ptr,
inputs[0].shape_.get<ndim>(), inputs[1].shape_.get<ndim>(),
axis, temperature, param.normalize.value(), ctx);
}
});
});
}
template<typename xpu, typename OP1, typename OP2, bool negate = false>
void SoftmaxGradCompute(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mxnet_op;
if (softmax_use_length(attrs)) {
MXNET_INT32_INT64_TYPE_SWITCH(inputs[2].type_flag_, IType, {
if (req[1] != kNullOp) {
mxnet_op::Kernel<mxnet_op::set_zero, xpu>::Launch(
ctx.get_stream<xpu>(), outputs[1].Size(), outputs[1].dptr<IType>());
}
});
}
if (req[0] == kNullOp) return;
const int itype = softmax_use_length(attrs) ? inputs[2].type_flag_ : kInt32;
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
int axis = CheckAxis(param.axis, inputs[0].ndim());
const double temperature = param.temperature.has_value() ?
param.temperature.value() : 1.0;
mxnet::TShape shape = AxisShapeCompact(inputs[0].shape_, &axis, true);
int out_idx = softmax_has_dtype_override(attrs) ? 2 : 1;
out_idx = softmax_use_length(attrs) ? 3 : out_idx;
bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", true);
MXNET_REAL_ACC_TYPE_SWITCH(inputs[0].type_flag_, OType, AType, {
MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MXNET_INT32_INT64_TYPE_SWITCH(itype, IType, {
IType * length_ptr = nullptr;
if (softmax_use_length(attrs)) {
length_ptr = inputs[2].dptr<IType>();
}
if (safe_acc) {
if (shape.ndim() == 2) {
SoftmaxGrad<OP1, OP2, Req, negate, AType>(
ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(),
inputs[0].dptr<OType>(), outputs[0].dptr<DType>(),
length_ptr, shape.get<2>(), axis,
static_cast<DType>(temperature));
} else {
SoftmaxGrad<OP1, OP2, Req, negate, AType>(
ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(),
inputs[0].dptr<OType>(), outputs[0].dptr<DType>(),
length_ptr, shape.get<3>(), axis,
static_cast<DType>(temperature));
}
} else {
if (shape.ndim() == 2) {
SoftmaxGrad<OP1, OP2, Req, negate, DType>(
ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(),
inputs[0].dptr<OType>(), outputs[0].dptr<DType>(),
length_ptr, shape.get<2>(), axis,
static_cast<DType>(temperature));
} else {
SoftmaxGrad<OP1, OP2, Req, negate, DType>(
ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(),
inputs[0].dptr<OType>(), outputs[0].dptr<DType>(),
length_ptr, shape.get<3>(), axis,
static_cast<DType>(temperature));
}
}
});
});
});
});
}
template<typename xpu, typename OP1, typename OP2, bool negate = false>
void MaskedSoftmaxGradCompute(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mxnet_op;
if (req[0] == kNullOp) return;
const MaskedSoftmaxParam& param = nnvm::get<MaskedSoftmaxParam>(attrs.parsed);
int axis = CheckAxis(param.axis, inputs[0].ndim());
const double temperature = param.temperature.has_value() ?
param.temperature.value() : 1.0;
bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", true);
MXNET_REAL_ACC_TYPE_SWITCH(inputs[0].type_flag_, DType, AType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MXNET_NDIM_SWITCH(inputs[0].ndim(), ndim, {
DType* ograd_ptr = inputs[0].dptr<DType>();
DType* out_ptr = inputs[2].dptr<DType>();
bool* mask_ptr = inputs[1].dptr<bool>();
DType* grad_data = outputs[0].dptr<DType>();
if (safe_acc) {
MaskedSoftmaxGrad<OP1, OP2, Req, negate, AType>(
ctx.get_stream<xpu>(), out_ptr,
ograd_ptr, grad_data, mask_ptr,
inputs[0].shape_.get<ndim>(), inputs[1].shape_.get<ndim>(),
axis, static_cast<DType>(temperature), ctx);
} else {
MaskedSoftmaxGrad<OP1, OP2, Req, negate, DType>(
ctx.get_stream<xpu>(), out_ptr,
ograd_ptr, grad_data, mask_ptr,
inputs[0].shape_.get<ndim>(), inputs[1].shape_.get<ndim>(),
axis, static_cast<DType>(temperature), ctx);
}
});
});
});
}
} // namespace op
} // namespace mxnet
namespace std {
template<>
struct hash<mxnet::op::SoftmaxParam> {
size_t operator()(const mxnet::op::SoftmaxParam& val) {
size_t ret = 0;
ret = dmlc::HashCombine(ret, val.axis);
ret = dmlc::HashCombine(ret, val.temperature);
ret = dmlc::HashCombine(ret, val.dtype);
ret = dmlc::HashCombine(ret, val.use_length);
return ret;
}
};
} // namespace std
#endif // MXNET_OPERATOR_NN_SOFTMAX_INL_H_
|
GB_binop__rdiv_uint64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rdiv_uint64)
// A.*B function (eWiseMult): GB (_AemultB_08__rdiv_uint64)
// A.*B function (eWiseMult): GB (_AemultB_02__rdiv_uint64)
// A.*B function (eWiseMult): GB (_AemultB_04__rdiv_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_uint64)
// A*D function (colscale): GB (_AxD__rdiv_uint64)
// D*A function (rowscale): GB (_DxB__rdiv_uint64)
// C+=B function (dense accum): GB (_Cdense_accumB__rdiv_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__rdiv_uint64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_uint64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_uint64)
// C=scalar+B GB (_bind1st__rdiv_uint64)
// C=scalar+B' GB (_bind1st_tran__rdiv_uint64)
// C=A+scalar GB (_bind2nd__rdiv_uint64)
// C=A'+scalar GB (_bind2nd_tran__rdiv_uint64)
// C type: uint64_t
// A type: uint64_t
// A pattern? 0
// B type: uint64_t
// B pattern? 0
// BinaryOp: cij = GB_IDIV_UNSIGNED (bij, aij, 64)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IDIV_UNSIGNED (y, x, 64) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RDIV || GxB_NO_UINT64 || GxB_NO_RDIV_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rdiv_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__rdiv_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rdiv_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rdiv_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rdiv_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rdiv_uint64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rdiv_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint64_t alpha_scalar ;
uint64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__rdiv_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rdiv_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__rdiv_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rdiv_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rdiv_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IDIV_UNSIGNED (bij, x, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rdiv_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IDIV_UNSIGNED (y, aij, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_UNSIGNED (aij, x, 64) ; \
}
GrB_Info GB (_bind1st_tran__rdiv_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_UNSIGNED (y, aij, 64) ; \
}
GrB_Info GB (_bind2nd_tran__rdiv_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
rowsums.c | // Jacobi 3D skeleton program
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "timing.h"
int main(int argc, char** argv) {
double wct_start,wct_end,cput_start,cput_end,runtime,r;
int iter,nrows,ncols,i,j,k,n, size;
double *f1, *f2, *rsum, *rsumtrue;
iter=50;
double mintime = 4.0;
if (argc != 3 && argc != 4) {
printf("Usage: %s <nrows> <ncols> [mintime]\n",argv[0]);
exit(1);
}
if (argc == 4) {
mintime = atof(argv[3]);
}
nrows = atoi(argv[1]);
ncols = atoi(argv[2]);
size = nrows * ncols;
f1 = malloc((size_t)size*sizeof(double));
rsum = malloc((size_t)nrows*sizeof(double));
rsumtrue = malloc((size_t)nrows*sizeof(double));
#pragma omp parallel for schedule(static)
for (i = 0; i < size; i++) {
f1[i] = sin( (double) i * i);
}
for (i = 0; i < nrows; i++) {
rsumtrue[i] = 0.0;
}
// for verification:
for (i = 0; i < ncols; i++) {
for (j = 0; j < nrows; j++) {
rsumtrue[j] += f1[i*nrows + j];
}
}
// time measurement
timing(&wct_start, &cput_start);
double my_rsum[nrows];
while (1) {
timing(&wct_start, &cput_start);
for (j = 0; j < iter; j++) {
for (i = 0; i < nrows; i++) {
rsum[i] = 0.0;
}
#pragma omp parallel private(my_rsum, i, j) shared(rsum)
{
for (i = 0; i < nrows; i++) {
my_rsum[i] = 0.0;
}
#pragma omp for schedule(static)
for (i = 0; i < ncols; i++) {
k = i * nrows;
for (j = 0; j < nrows; j++, k++) {
my_rsum[j] += f1[k];
}
}
#pragma omp critical
for (i = 0; i < nrows; i++) {
rsum[i] += my_rsum[i];
}
} // end of #pragma omp parallel
}
timing(&wct_end, &cput_end);
// making sure mintime was spent, otherwise restart with 2*iter
if (wct_end - wct_start > mintime) {
break;
}
iter = iter * 2;
}
timing(&wct_end, &cput_end);
runtime = wct_end-wct_start;
printf("size:\t%d\ttime/iter:\t%lf\tGFLOP/s:\t%lf\n", size, runtime/iter, ((double)iter) * size * 1e-9 / runtime);
// verifying correctness of the computation
for (i = 0; i < nrows; i++) {
if (abs(rsum[i] - rsumtrue[i]) > 1e-5) {
printf("Problem in %d-th row value\n", i);
exit(1);
}
}
return 0;
}
|
GB_binop__band_int32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__band_int32)
// A.*B function (eWiseMult): GB (_AemultB_08__band_int32)
// A.*B function (eWiseMult): GB (_AemultB_02__band_int32)
// A.*B function (eWiseMult): GB (_AemultB_04__band_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__band_int32)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__band_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__band_int32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__band_int32)
// C=scalar+B GB (_bind1st__band_int32)
// C=scalar+B' GB (_bind1st_tran__band_int32)
// C=A+scalar GB (_bind2nd__band_int32)
// C=A'+scalar GB (_bind2nd_tran__band_int32)
// C type: int32_t
// A type: int32_t
// A pattern? 0
// B type: int32_t
// B pattern? 0
// BinaryOp: cij = (aij) & (bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x) & (y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BAND || GxB_NO_INT32 || GxB_NO_BAND_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__band_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__band_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__band_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__band_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int32_t alpha_scalar ;
int32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int32_t *) alpha_scalar_in)) ;
beta_scalar = (*((int32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__band_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__band_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__band_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__band_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__band_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x) & (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__band_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij) & (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x) & (aij) ; \
}
GrB_Info GB (_bind1st_tran__band_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij) & (y) ; \
}
GrB_Info GB (_bind2nd_tran__band_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
bert_layer_mb1_dynamic_tokens.h | #ifndef BERT_LAYER_H_
#define BERT_LAYER_H_
#include <new>
#include <string>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <assert.h>
#include <math.h>
#include <mkl.h>
#include <omp.h>
#include <iostream>
#include <immintrin.h>
#include "my_types.h"
//#include "timer.h"
class BertLayer
{
public:
// intermediateSize 3072 feed-forward/filter size 升维维度 4*hiddensize
BertLayer(int layerIdx, int maxTokenSize = 128, int hiddenSize = 768, int intermediateSize = 3072) {
this->layerIdx = layerIdx;
this->maxTokenSize = maxTokenSize;
this->hiddenSize = hiddenSize;
this->intermediateSize = intermediateSize;
qkvMatMul.Resize(maxTokenSize, hiddenSize*3);
resultBuffer1.Resize(maxTokenSize, hiddenSize);
resultBuffer2.Resize(maxTokenSize, hiddenSize);
intermediateBuffer.Resize(maxTokenSize, intermediateSize);
for (int i = 0; i < 12; ++i) {
qk_result[i] = (float *)aligned_alloc(64, sizeof(float) * maxTokenSize * maxTokenSize);
exp_buffer[i] = (float *)aligned_alloc(64, sizeof(float) * maxTokenSize);
}
magic_value = (float *)aligned_alloc(64, sizeof(float) * maxTokenSize);
int mkl_num_threads = mkl_get_max_threads();
#pragma omp parallel
{
int tid = omp_get_thread_num();
if (tid == 0) { num_threads = omp_get_num_threads(); }
}
if (mkl_num_threads != num_threads) {
printf("WARNING: mkl_num_threads=%d, omp_num_threads=%d\n", mkl_num_threads, num_threads);
}
#ifndef __INTEL_COMPILER
erf_buffer = new float * [num_threads];
for (int i = 0; i < num_threads; ++i) {
erf_buffer[i] = (float *)aligned_alloc(64, sizeof(float) * intermediateSize);
}
#endif
}
virtual ~BertLayer() {
for (int i = 0; i < 12; ++i) {
free(qk_result[i]);
free(exp_buffer[i]);
qk_result[i] = NULL;
exp_buffer[i] = NULL;
}
free(magic_value);
magic_value = NULL;
#ifndef __INTEL_COMPILER
for (int i = 0; i < num_threads; ++i) {
free(erf_buffer[i]);
}
delete[] erf_buffer;
erf_buffer = NULL;
#endif
}
void setWeights(const float *_queryWeight, const float *_queryBias,
const float *_keyWeight, const float *_keyBias,
const float *_valueWeight, const float *_valueBias,
const float *_attentionOutputWeight, const float *_attentionOutputBias,
const float *_gamma1, const float *_beta1,
const float *_intermediateWeight, const float *_intermediateBias,
const float *_outputWeight, const float *_outputBias,
const float *_gamma2, const float *_beta2) {
// Merged weights, dimension is like: 768*(768*3)
hpj::Matrix<float> tmp;
tmp.Resize(hiddenSize, hiddenSize * 3);
copyWeights(tmp, 0, hiddenSize, _queryWeight);
copyWeights(tmp, hiddenSize, hiddenSize*2, _keyWeight);
copyWeights(tmp, hiddenSize*2, hiddenSize*3, _valueWeight);
copyTransposed(qkvWeight, tmp);
/*
qkvWeight.Resize(hiddenSize, hiddenSize * 3);
copyWeights(qkvWeight, 0, hiddenSize, _queryWeight);
copyWeights(qkvWeight, hiddenSize, hiddenSize*2, _keyWeight);
copyWeights(qkvWeight, hiddenSize*2, hiddenSize*3, _valueWeight);
*/
// Merged bias
qkvBias.Resize(hiddenSize * 3);
memcpy(qkvBias.Data(), _queryBias, sizeof(float) * hiddenSize);
memcpy(qkvBias.Data() + hiddenSize, _keyBias, sizeof(float) * hiddenSize);
memcpy(qkvBias.Data() + hiddenSize*2, _valueBias, sizeof(float) * hiddenSize);
// Weights for attention output
attentionOutputWeight.Resize(hiddenSize, hiddenSize);
copyWeights(attentionOutputWeight, _attentionOutputWeight);
attentionOutputBias.Resize(hiddenSize);
memcpy(attentionOutputBias.Data(), _attentionOutputBias, sizeof(float) * hiddenSize);
// gamma and beta for batchnorm after self attention
gamma1.Resize(hiddenSize);
beta1.Resize(hiddenSize);
memcpy(gamma1.Data(), _gamma1, sizeof(float) * hiddenSize);
memcpy(beta1.Data(), _beta1, sizeof(float) * hiddenSize);
// intermediate weight and bias
intermediateWeight.Resize(hiddenSize, intermediateSize);
copyWeights(intermediateWeight, _intermediateWeight);
intermediateBias.Resize(intermediateSize);
memcpy(intermediateBias.Data(), _intermediateBias, sizeof(float) * intermediateSize);
// output dense weight and bias
outputWeight.Resize(intermediateSize, hiddenSize);
copyWeights(outputWeight, _outputWeight);
outputBias.Resize(hiddenSize);
memcpy(outputBias.Data(), _outputBias, sizeof(float) * hiddenSize);
// gamma and beta for the last batchnorm
gamma2.Resize(hiddenSize);
beta2.Resize(hiddenSize);
memcpy(gamma2.Data(), _gamma2, sizeof(float) * hiddenSize);
memcpy(beta2.Data(), _beta2, sizeof(float) * hiddenSize);
}
// Do the forward computing for the whole BERT layer
// input: inputTokenSize x hidden_size
// actualTokens: #tokens = inputTokenSize - padded_tokens
hpj::Matrix<float> &forward(hpj::Matrix<float> &inputBuffer, int inputTokens, int actualTokens) {
this->inputTokenSize = inputTokens;
// Query, Key, Value computed together
sgemm(inputBuffer, qkvWeight, qkvMatMul);
biasAdd(qkvMatMul, qkvBias);
//dumpMatrix(qkvMatMul);
// BatchMatMul
hpj::Matrix<float> query(qkvMatMul, 0, inputTokenSize, 0, hiddenSize);
hpj::Matrix<float> key(qkvMatMul, 0, inputTokenSize, hiddenSize, hiddenSize);
hpj::Matrix<float> value(qkvMatMul, 0, inputTokenSize, hiddenSize*2, hiddenSize);
batchMatMul(query, key, qk_result);
//printf("qk_result[0]=%f,%f\n", qk_result[0][0], qk_result[0][1]);
// Softmax
computeSoftmax(actualTokens);
#ifdef DEBUG
printf("bert/encoder/layer_%d/attention/self/Softmax:\n", layerIdx);
printf("%f, %f, ...\n", qk_result[0][0], qk_result[0][1]);
printf("%f, %f, ...\n", qk_result[1][0], qk_result[1][1]);
#endif
// BatchMatMul
batchMatMul(qk_result, value, resultBuffer1);
#ifdef DEBUG
printf("bert/encoder/layer_%d/attention/self/Reshape_3:\n", layerIdx);
dumpMatrix(resultBuffer1);
#endif
// dense
denseWithSum(resultBuffer1, attentionOutputWeight, attentionOutputBias, inputBuffer, resultBuffer2);
#ifdef DEBUG
printf("bert/encoder/layer_%d/attention/output/add:\n", layerIdx);
dumpMatrix(resultBuffer2);
#endif
// batchmorm
batchnorm(resultBuffer2, gamma1, beta1);
#ifdef DEBUG
printf("bert/encoder/layer_%d/attention/output/LayerNorm/batchnorm/add_1:\n", layerIdx);
dumpMatrix(resultBuffer2);
#endif
// intermediate
intermediate(resultBuffer2, intermediateBuffer);
#ifdef DEBUG
printf("intermediate(bert/encoder/layer_%d/intermediate/dense/mul_1):\n", layerIdx);
dumpMatrix(intermediateBuffer);
#endif
// dense in output
denseWithSum(intermediateBuffer, outputWeight, outputBias, resultBuffer2, resultBuffer1);
#ifdef DEBUG
printf("bert/encoder/layer_%d/output/add:\n", layerIdx);
dumpMatrix(resultBuffer1);
#endif
// batchnorm
batchnorm(resultBuffer1, gamma2, beta2);
#ifdef DEBUG
printf("bert/encoder/layer_%d/output/LayerNorm/batchnorm/add_1:\n", layerIdx);
dumpMatrix(resultBuffer1);
#endif
return resultBuffer1;
}
private:
void copyWeights(hpj::Matrix<float> &w, int start_col, int end_col, const float *data) {
hpj::Matrix<float> subW(w, 0, w.Rows(), start_col, end_col - start_col);
copyWeights(subW, data);
}
void copyWeights(hpj::Matrix<float> &w, const float *data) {
for (int i = 0; i < w.Rows(); ++i) {
for (int j = 0; j < w.Cols(); ++j) {
w(i, j) = *data++;
}
}
}
void copyTransposed(hpj::Matrix<float> &dst, hpj::Matrix<float> &src) {
dst.Resize(src.Cols(), src.Rows());
for (int i = 0; i < dst.Rows(); ++i) {
for (int j = 0; j < dst.Cols(); ++j) {
dst(i, j) = src(j, i);
}
}
}
void dumpMatrix(hpj::Matrix<float> &m) {
int cols = m.Cols();
for (int i = 0; i < m.Rows(); ++i) {
if (m.Cols() < 10) {
for (int j = 0; j < m.Cols(); ++j) {
std::cout << m(i, j) << " ";
}
} else {
std::cout << m(i, 0) << " " << m(i, 1) << " " << m(i, 2) << " ... " << m(i, cols-3) << " " << m(i, cols-2) << " " << m(i, cols-1);
}
std::cout << std::endl;
}
}
// C = A * B
// bTranspose: B need to be transposed or not
void sgemm(hpj::Matrix<float> &A, hpj::Matrix<float> &B, hpj::Matrix<float> &C) {
bool bTranspose = (A.Cols() != B.Rows());
int m = inputTokenSize; //As the input token size may be less than max token size, not use A.Rows() any more
int k = A.Cols();
int n = (bTranspose ? B.Rows() : B.Cols());
float alpha = 1;
float beta = 0;
cblas_sgemm(CblasRowMajor, CblasNoTrans, (bTranspose ? CblasTrans : CblasNoTrans),
m, n, k, alpha,
A.Data(), A.Stride(),
B.Data(), B.Stride(), beta,
C.Data(), C.Stride());
}
// result = x * weight + bias + input
void denseWithSum(hpj::Matrix<float> &x, hpj::Matrix<float> &weight, hpj::Vector<float> &bias, hpj::Matrix<float> &input, hpj::Matrix<float> &result) {
//assert(input.Rows() == result.Rows());
assert(input.Cols() == result.Cols());
sgemm(x, weight, result);
float *pbias = bias.Data();
#pragma omp parallel for
for (int i = 0; i < inputTokenSize; ++i) {
float *presult = result.Row(i);
float *pinput = input.Row(i);
#pragma omp simd
for (int j = 0; j < result.Cols(); ++j) {
presult[j] += pinput[j] + pbias[j];
}
}
}
void batchnorm(hpj::Matrix<float> &x, hpj::Vector<float> &gamma, hpj::Vector<float> &beta) {
assert(x.Cols() == hiddenSize);
float *pgamma = gamma.Data();
float *pbeta = beta.Data();
#pragma omp parallel for
for (int i = 0; i < inputTokenSize; ++i) {
float sum = 0;
float *px = x.Row(i);
#pragma omp simd
for (int j = 0; j < x.Cols(); ++j) {
sum += px[j];
}
float mean = sum / hiddenSize;
sum = 0;
#pragma omp simd
for (int j = 0; j < x.Cols(); ++j) {
float delta = (px[j] - mean);
sum += delta * delta;
}
float tmp = sum / hiddenSize + 9.999999960041972e-13;
float rvariance = 1.0f / sqrt(tmp);
#pragma omp simd
for (int j = 0; j < x.Cols(); ++j) {
px[j] = (px[j] - mean) * rvariance * pgamma[j] + pbeta[j];
}
}
}
void intermediate(hpj::Matrix<float> &input, hpj::Matrix<float> &output) {
sgemm(input, intermediateWeight, output);
float *pbias = intermediateBias.Data();
const float factor = sqrt(0.5f);
const float scale = 0.5f / factor;
#ifdef __INTEL_COMPILER
#pragma omp parallel for
for (int i = 0; i < inputTokenSize; ++i) {
float *pout = output.Row(i);
#pragma omp simd
for (int j = 0; j < output.Cols(); ++j) {
float with_bias = pout[j] + pbias[j];
pout[j] = with_bias * 0.5f * (erf(with_bias * factor) + 1);
}
}
#else
#pragma omp parallel for
for (int i = 0; i < inputTokenSize; ++i) {
int tid = omp_get_thread_num();
float *pout = output.Row(i);
#pragma omp simd
for (int j = 0; j < output.Cols(); ++j) {
pout[j] = (pout[j] + pbias[j]) * factor;
}
vsErf(output.Cols(), pout, erf_buffer[tid]);
#pragma omp simd
for (int j = 0; j < output.Cols(); ++j) {
pout[j] = pout[j] * scale * (erf_buffer[tid][j] + 1);
}
}
#endif
}
// ONLY for dimension 768
// The first BatchMatMul inside self attention
void batchMatMul(hpj::Matrix<float> &A, hpj::Matrix<float> &B, float *c_array[12]){
#define GRP_COUNT 1
MKL_INT m[GRP_COUNT] = {inputTokenSize};
MKL_INT k[GRP_COUNT] = {64};
MKL_INT n[GRP_COUNT] = {inputTokenSize};
MKL_INT lda[GRP_COUNT] = {A.Stride()};
MKL_INT ldb[GRP_COUNT] = {B.Stride()};
MKL_INT ldc[GRP_COUNT] = {inputTokenSize};
CBLAS_TRANSPOSE transA[GRP_COUNT] = { CblasNoTrans };
CBLAS_TRANSPOSE transB[GRP_COUNT] = { CblasTrans };
float alpha[GRP_COUNT] = {1.0};
float beta[GRP_COUNT] = {0.0};
const MKL_INT size_per_grp[GRP_COUNT] = {12};
// Total number of multiplications: 12
const float *a_array[12], *b_array[12];
for (int i = 0; i < 12; ++i) {
a_array[i] = A.Data() + i * 64;
b_array[i] = B.Data() + i * 64;
}
// Call cblas_sgemm_batch
cblas_sgemm_batch (
CblasRowMajor,
transA,
transB,
m,
n,
k,
alpha,
a_array,
lda,
b_array,
ldb,
beta,
c_array,
ldc,
GRP_COUNT,
size_per_grp);
}
// ONLY for dimension 768
// The second BatchMatMul inside self attention
void batchMatMul(float *a_array[12], hpj::Matrix<float> &B, hpj::Matrix<float> &C) {
#define GRP_COUNT 1
MKL_INT m[GRP_COUNT] = {inputTokenSize};
MKL_INT k[GRP_COUNT] = {inputTokenSize};
MKL_INT n[GRP_COUNT] = {64};
MKL_INT lda[GRP_COUNT] = {inputTokenSize};
MKL_INT ldb[GRP_COUNT] = {B.Stride()};
MKL_INT ldc[GRP_COUNT] = {C.Stride()};
CBLAS_TRANSPOSE transA[GRP_COUNT] = { CblasNoTrans };
CBLAS_TRANSPOSE transB[GRP_COUNT] = { CblasNoTrans };
float alpha[GRP_COUNT] = {1.0};
float beta[GRP_COUNT] = {0.0};
const MKL_INT size_per_grp[GRP_COUNT] = {12};
// Total number of multiplications: 12
float *b_array[12], *c_array[12];
for (int i = 0; i < 12; ++i) {
b_array[i] = B.Data() + i * 64;
c_array[i] = C.Data() + i * 64;
}
// Call cblas_sgemm_batch
cblas_sgemm_batch (
CblasRowMajor,
transA,
transB,
m,
n,
k,
alpha,
(const float **)a_array,
lda,
(const float **)b_array,
ldb,
beta,
c_array,
ldc,
GRP_COUNT,
size_per_grp);
}
// Add bias to matrix
void biasAdd(hpj::Matrix<float> &m, hpj::Vector<float> &bias) {
float *pbias = bias.Data();
#pragma omp parallel for
for (int i = 0; i < inputTokenSize; ++i) {
float *p = m.Row(i);
#pragma omp simd
for (int j = 0; j < m.Cols(); ++j) {
p[j] += pbias[j];
}
}
}
// input and output are both in qk_result
void computeSoftmax(int actualTokens) {
for (int i = 0; i < actualTokens; ++i) { magic_value[i] = 0; }
for (int i = actualTokens; i < inputTokenSize; ++i) { magic_value[i] = -10000; }
#pragma omp parallel for
for (int i = 0; i < 12; ++i) {
float *pbuffer = exp_buffer[i];
for (int row = 0; row < inputTokenSize; ++row) {
float sum = 0;
// max_val is used to avoid exp(x) = inf
float max_val = std::numeric_limits<float>::min();
#pragma omp simd
for (int j = 0; j < actualTokens; ++j) {
if (qk_result[i][row*inputTokenSize+j] > max_val) {
max_val = qk_result[i][row*inputTokenSize+j];
}
}
max_val *= 0.125f;
#ifdef __INTEL_COMPILER
#pragma omp simd
for (int j = 0; j < inputTokenSize; ++j) {
pbuffer[j] = exp(qk_result[i][row*inputTokenSize+j] * 0.125f + magic_value[j] - max_val);
sum += pbuffer[j];
}
#else
#pragma omp simd
for (int j = 0; j < inputTokenSize; ++j) {
pbuffer[j] = qk_result[i][row*inputTokenSize+j] * 0.125f + magic_value[j] - max_val;
}
vsExp(inputTokenSize, pbuffer, pbuffer);
for (int j = 0; j < inputTokenSize; ++j) {
sum += pbuffer[j];
}
#endif
float r_sum = 1.0f / sum;
#pragma omp simd
for (int j = 0; j < inputTokenSize; ++j) {
qk_result[i][row*inputTokenSize+j] = pbuffer[j] * r_sum;
}
}
}
}
private:
// For debug usage
int layerIdx;
int maxTokenSize;
// For some model, the len(input_ids) may be less than maxTokenSize
int inputTokenSize;
int hiddenSize;
int intermediateSize;
// Store the result of input*qkvWeight
hpj::Matrix<float> qkvMatMul;
// Buffer like the dimesion of 128x768
hpj::Matrix<float> resultBuffer1, resultBuffer2;
// Buffer to store the result of intermediate
hpj::Matrix<float> intermediateBuffer;
// Store the BatchMatMul result of query and key
float *qk_result[12];
// Store the result of exp for each line
float *exp_buffer[12];
// Magic value: 0 or -10000
float *magic_value;
int num_threads;
#ifndef __INTEL_COMPILER
float **erf_buffer;
#endif
// Merged query, key, value weighs
hpj::Matrix<float> qkvWeight;
// Merged query, key, value bias
hpj::Vector<float> qkvBias;
hpj::Matrix<float> attentionOutputWeight;
hpj::Vector<float> attentionOutputBias;
// batchnorm param
hpj::Vector<float> gamma1, beta1;
hpj::Vector<float> gamma2, beta2;
hpj::Matrix<float> intermediateWeight;
hpj::Vector<float> intermediateBias;
hpj::Matrix<float> outputWeight;
hpj::Vector<float> outputBias;
};
#endif
|
openmp_matrix.c | /**
* Implement some parallel algorithms operating on matrixes with OpenMP
*/
#ifndef _GNU_SOURCE
# define _GNU_SOURCE /* for snprintf */
#endif
/* Define inline for OpenMP compatibility with "clang -ansi" */
#ifdef __clang__
# define inline __inline__
#endif
#include <errno.h>
#include <omp.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
/**
* Output a string to standard output without any buffering
*/
static void print_nobuf(const char *string)
{
size_t len = strlen(string);
while (len > 0) {
ssize_t count = write(STDOUT_FILENO, string, len);
if (count == -1 && errno == -EINTR) {
continue;
}
if (count <= 0) {
break;
}
string += count;
len -= (size_t)count;
}
}
/**
* Malloc and exit if it failed
*/
static void *malloc_nofail(size_t size)
{
void *ptr = malloc(size);
if (!ptr) {
fprintf(stderr, "malloc: failed to allocate %lu bytes\n",
(unsigned long)size);
exit(1);
}
return ptr;
}
/**
* Fill a square matrix with a value
*/
static void fill_square_matrix(double *matrix, size_t size, double value)
{
size_t i = 0, j;
/* Use static schedule (default) */
#pragma omp parallel for private(i, j)
for (i = 0; i < size; i++) {
for (j = 0; j < size; j++) {
matrix[i * size + j] = value;
}
}
}
/**
* Initialize a list of vectors
*/
static void init_vector_list(double *vectors, size_t size, size_t dim)
{
size_t i = 0, j;
#pragma omp parallel for private(i, j)
for (i = 0; i < size; i++) {
for (j = 0; j < dim; j++) {
vectors[i * dim + j] = ((double)i) + ((double)j + 1) / 100.;
}
}
}
/**
* Sum the values of a square matrix
*/
static double sum_square_matrix(const double *matrix, size_t size)
{
size_t i_j = 0;
double sum = 0;
#pragma omp parallel for private(i_j) reduction(+:sum)
for (i_j = 0; i_j < size * size; i_j++) {
sum += matrix[i_j];
}
return sum;
}
/**
* Compute the squared euclidean distance of vectors in a square matrix
* Here are several ways of implementing this:
* 1. Compute separately each cell in matrix
* 2. Compute by triangles
*/
static void sq_euclidean_distance1(
double *matrix, const double *vectors, size_t size, size_t dim)
{
size_t i_j = 0;
#pragma omp parallel for private(i_j)
for (i_j = 0; i_j < size * size; i_j++) {
size_t i = i_j / size;
size_t j = i_j % size;
size_t k;
double dist = 0;
for (k = 0; k < dim; k++) {
double diff = vectors[i * dim + k] - vectors[j * dim + k];
dist += diff * diff;
}
matrix[i_j] = dist;
}
}
static void sq_euclidean_distance2(
double *matrix, const double *vectors, size_t size, size_t dim)
{
size_t i = 0;
#pragma omp parallel for private(i)
for (i = 0; i < size; i++) {
size_t j;
matrix[i * size + i] = 0;
for (j = i + 1; j < size; j++) {
size_t k;
double dist = 0;
for (k = 0; k < dim; k++) {
double diff = vectors[i * dim + k] - vectors[j * dim + k];
dist += diff * diff;
}
matrix[i * size + j] = dist;
matrix[j * size + i] = dist;
}
}
}
int main(void)
{
const size_t size = 5000, dim = 2;
double *matrix;
double *vectors;
/* Use omp_get_max_threads() instead of omp_get_thread_num() outside an
* OpenMP loop.
*/
printf("OpenMP max threads: %d\n", omp_get_max_threads());
fflush(stdout);
/* Test that everything is fine */
print_nobuf("OpenMP threads:");
#pragma omp parallel
{
int this_thread = omp_get_thread_num();
int num_threads = omp_get_num_threads();
char buffer[sizeof(" [/]") + 2 * 11];
snprintf(buffer, sizeof(buffer), " [%d/%d]", this_thread, num_threads);
print_nobuf(buffer);
}
print_nobuf("\n");
/* Allocate a big matrix and 2 lists of vectors */
matrix = malloc_nofail(size * size * sizeof(double));
vectors = malloc_nofail(size * dim * sizeof(double));
/* Initialization */
fill_square_matrix(matrix, size, 0);
init_vector_list(vectors, size, 2);
/* Computations */
sq_euclidean_distance1(matrix, vectors, size, dim);
printf("1: sum(eucl_dist(vects)) = %f\n", sum_square_matrix(matrix, size));
sq_euclidean_distance2(matrix, vectors, size, dim);
printf("2: sum(eucl_dist(vects)) = %f\n", sum_square_matrix(matrix, size));
/* Free the mallocs */
free(matrix);
free(vectors);
return 0;
}
|
zsyr2k.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> s d c
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_syr2k
*
* Performs one of the symmetric rank 2k operations
*
* \f[ C = \alpha A \times B^T + \alpha B \times A^T + \beta C, \f]
* or
* \f[ C = \alpha A^T \times B + \alpha B^T \times A + \beta C, \f]
*
* where alpha and beta are scalars,
* C is an n-by-n symmetric matrix, and A and B are n-by-k matrices
* in the first case and k-by-n matrices in the second case.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of C is stored;
* - PlasmaLower: Lower triangle of C is stored.
*
* @param[in] trans
* - PlasmaNoTrans:
* \f[ C = \alpha A \times B^T + \alpha B \times A^T + \beta C; \f]
* - PlasmaTrans:
* \f[ C = \alpha A^T \times B + \alpha B^T \times A + \beta C. \f]
*
* @param[in] n
* The order of the matrix C. n >= zero.
*
* @param[in] k
* If trans = PlasmaNoTrans, number of columns of the A and B matrices;
* if trans = PlasmaTrans, number of rows of the A and B matrices.
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] pA
* An lda-by-ka matrix.
* If trans = PlasmaNoTrans, ka = k;
* if trans = PlasmaTrans, ka = n.
*
* @param[in] lda
* The leading dimension of the array A.
* If trans = PlasmaNoTrans, lda >= max(1, n);
* if trans = PlasmaTrans, lda >= max(1, k).
*
* @param[in] pB
* An ldb-by-kb matrix.
* If trans = PlasmaNoTrans, kb = k;
* if trans = PlasmaTrans, kb = n.
*
* @param[in] ldb
* The leading dimension of the array B.
* If trans = PlasmaNoTrans, ldb >= max(1, n);
* if trans = PlasmaTrans, ldb >= max(1, k).
*
* @param[in] beta
* The scalar beta.
*
* @param[in,out] pC
* An ldc-by-n matrix.
* On exit, the uplo part of the matrix is overwritten
* by the uplo part of the updated matrix.
*
* @param[in] ldc
* The leading dimension of the array C. ldc >= max(1, n).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
*
*******************************************************************************
*
* @sa plasma_omp_zsyr2k
* @sa plasma_csyr2k
* @sa plasma_dsyr2k
* @sa plasma_ssyr2k
*
******************************************************************************/
int plasma_zsyr2k(plasma_enum_t uplo, plasma_enum_t trans,
int n, int k,
plasma_complex64_t alpha, plasma_complex64_t *pA, int lda,
plasma_complex64_t *pB, int ldb,
plasma_complex64_t beta, plasma_complex64_t *pC, int ldc)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
return -1;
}
if ((trans != PlasmaNoTrans) &&
(trans != PlasmaTrans)) {
plasma_error("illegal value of trans");
return -2;
}
if (n < 0) {
plasma_error("illegal value of n");
return -3;
}
if (k < 0) {
plasma_error("illegal value of k");
return -4;
}
int am, an;
int bm, bn;
if (trans == PlasmaNoTrans) {
am = n;
an = k;
bm = n;
bn = k;
}
else {
am = k;
an = n;
bm = k;
bn = n;
}
if (lda < imax(1, am)) {
plasma_error("illegal value of lda");
return -7;
}
if (ldb < imax(1, bm)) {
plasma_error("illegal value of ldb");
return -9;
}
if (ldc < imax(1, n)) {
plasma_error("illegal value of ldc");
return -12;
}
// quick return
if (n == 0 || ((alpha == 0.0 || k == 0.0) && beta == 1.0))
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_syr2k(plasma, PlasmaComplexDouble, n, k);
// Set tiling parameters.
int nb = plasma->nb;
// Create tile matrices.
plasma_desc_t A;
plasma_desc_t B;
plasma_desc_t C;
int retval;
retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb,
am, an, 0, 0, am, an, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb,
bm, bn, 0, 0, bm, bn, &B);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
return retval;
}
retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb,
n, n, 0, 0, n, n, &C);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
plasma_desc_destroy(&B);
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_zge2desc(pA, lda, A, &sequence, &request);
plasma_omp_zge2desc(pB, ldb, B, &sequence, &request);
plasma_omp_zge2desc(pC, ldc, C, &sequence, &request);
// Call the tile async function.
plasma_omp_zsyr2k(uplo, trans,
alpha, A,
B,
beta, C,
&sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_zdesc2ge(C, pC, ldc, &sequence, &request);
}
// implicit synchronization
// Free matrices in tile layout.
plasma_desc_destroy(&A);
plasma_desc_destroy(&B);
plasma_desc_destroy(&C);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_syr2k
*
* Performs rank 2k update.
* Non-blocking tile version of plasma_zsyr2k().
* May return before the computation is finished.
* Operates on matrices stored by tiles.
* All matrices are passed through descriptors.
* All dimensions are taken from the descriptors.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of C is stored;
* - PlasmaLower: Lower triangle of C is stored.
*
* @param[in] trans
* - PlasmaNoTrans:
* \f[ C = \alpha A \times B^T + \alpha B \times A^T + \beta C; \f]
* - PlasmaTrans:
* \f[ C = \alpha A^T \times B + \alpha B^T \times A + \beta C. \f]
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] A
* Descriptor of matrix A.
*
*@param[in] B
* Descriptor of matrix B.
*
* @param[in] beta
* The scalar beta.
*
* @param[in,out] C
* Descriptor of matrix C.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes). Check
* the sequence->status for errors.
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_zsyr2k
* @sa plasma_omp_zsyr2k
* @sa plasma_omp_csyr2k
*
******************************************************************************/
void plasma_omp_zsyr2k(plasma_enum_t uplo, plasma_enum_t trans,
plasma_complex64_t alpha, plasma_desc_t A,
plasma_desc_t B,
plasma_complex64_t beta, plasma_desc_t C,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if ((trans != PlasmaNoTrans) && (trans != PlasmaTrans)) {
plasma_error("illegal value of trans");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
plasma_error("invalid A");
return;
}
if (plasma_desc_check(B) != PlasmaSuccess) {
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
plasma_error("invalid B");
return;
}
if (plasma_desc_check(C) != PlasmaSuccess) {
plasma_error("invalid C");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
int k = trans == PlasmaNoTrans ? A.n : A.m;
if (C.m == 0 || ((alpha == 0.0 || k == 0) && beta == 1.0))
return;
// Call the parallel function.
plasma_pzsyr2k(uplo, trans,
alpha, A,
B,
beta, C,
sequence, request);
}
|
GB_binop__bxnor_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bxnor_int64)
// A.*B function (eWiseMult): GB (_AemultB_01__bxnor_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__bxnor_int64)
// A.*B function (eWiseMult): GB (_AemultB_03__bxnor_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bxnor_int64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bxnor_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__bxnor_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxnor_int64)
// C=scalar+B GB (_bind1st__bxnor_int64)
// C=scalar+B' GB (_bind1st_tran__bxnor_int64)
// C=A+scalar GB (_bind2nd__bxnor_int64)
// C=A'+scalar GB (_bind2nd_tran__bxnor_int64)
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = ~((aij) ^ (bij))
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ~((x) ^ (y)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BXNOR || GxB_NO_INT64 || GxB_NO_BXNOR_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bxnor_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bxnor_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bxnor_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bxnor_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__bxnor_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bxnor_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__bxnor_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bxnor_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bxnor_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = ~((x) ^ (bij)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bxnor_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = ~((aij) ^ (y)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ~((x) ^ (aij)) ; \
}
GrB_Info GB (_bind1st_tran__bxnor_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ~((aij) ^ (y)) ; \
}
GrB_Info GB (_bind2nd_tran__bxnor_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d25pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 8;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=2*Nt-2;t1++) {
lbp=ceild(t1+2,2);
ubp=min(floord(4*Nt+Nz-9,4),floord(2*t1+Nz-4,4));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(ceild(t1,4),ceild(4*t2-Nz+5,8));t3<=min(min(floord(4*Nt+Ny-9,8),floord(2*t1+Ny-3,8)),floord(4*t2+Ny-9,8));t3++) {
for (t4=max(max(ceild(t1-28,32),ceild(4*t2-Nz-51,64)),ceild(8*t3-Ny-51,64));t4<=min(min(min(floord(4*Nt+Nx-9,64),floord(2*t1+Nx-3,64)),floord(4*t2+Nx-9,64)),floord(8*t3+Nx-5,64));t4++) {
for (t5=max(max(max(ceild(t1,2),ceild(4*t2-Nz+5,4)),ceild(8*t3-Ny+5,4)),ceild(64*t4-Nx+5,4));t5<=floord(t1+1,2);t5++) {
for (t6=max(4*t2,-4*t1+4*t2+8*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(8*t3,4*t5+4);t7<=min(8*t3+7,4*t5+Ny-5);t7++) {
lbv=max(64*t4,4*t5+4);
ubv=min(64*t4+63,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
c55c7aec73df0f31d67fbe39510946453b899e1d.c | #define _POSIX_C_SOURCE 200809L
#include "stdlib.h"
#include "math.h"
#include "sys/time.h"
#include "omp.h"
struct dataobj
{
void *restrict data;
int * size;
int * npsize;
int * dsize;
int * hsize;
int * hofs;
int * oofs;
} ;
struct profiler
{
double section0;
double section1;
double section2;
} ;
int Forward(struct dataobj *restrict damp_vec, const float dt, const float o_x, const float o_y, const float o_z, struct dataobj *restrict rec_vec, struct dataobj *restrict rec_coords_vec, struct dataobj *restrict src_vec, struct dataobj *restrict src_coords_vec, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int p_rec_M, const int p_rec_m, const int p_src_M, const int p_src_m, const int time_M, const int time_m, struct profiler * timers)
{
float (*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[damp_vec->size[1]][damp_vec->size[2]]) damp_vec->data;
float (*restrict rec)[rec_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[rec_vec->size[1]]) rec_vec->data;
float (*restrict rec_coords)[rec_coords_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[rec_coords_vec->size[1]]) rec_coords_vec->data;
float (*restrict src)[src_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[src_vec->size[1]]) src_vec->data;
float (*restrict src_coords)[src_coords_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[src_coords_vec->size[1]]) src_coords_vec->data;
float (*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]]) u_vec->data;
float (*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[vp_vec->size[1]][vp_vec->size[2]]) vp_vec->data;
#pragma omp target enter data map(to: rec[0:rec_vec->size[0]][0:rec_vec->size[1]])
#pragma omp target enter data map(to: u[0:u_vec->size[0]][0:u_vec->size[1]][0:u_vec->size[2]][0:u_vec->size[3]])
#pragma omp target enter data map(to: damp[0:damp_vec->size[0]][0:damp_vec->size[1]][0:damp_vec->size[2]])
#pragma omp target enter data map(to: rec_coords[0:rec_coords_vec->size[0]][0:rec_coords_vec->size[1]])
#pragma omp target enter data map(to: src[0:src_vec->size[0]][0:src_vec->size[1]])
#pragma omp target enter data map(to: src_coords[0:src_coords_vec->size[0]][0:src_coords_vec->size[1]])
#pragma omp target enter data map(to: vp[0:vp_vec->size[0]][0:vp_vec->size[1]][0:vp_vec->size[2]])
for (int time = time_m, t0 = (time)%(3), t1 = (time + 1)%(3), t2 = (time + 2)%(3); time <= time_M; time += 1, t0 = (time)%(3), t1 = (time + 1)%(3), t2 = (time + 2)%(3))
{
struct timeval start_section0, end_section0;
gettimeofday(&start_section0, NULL);
/* Begin section0 */
#pragma omp target teams distribute parallel for collapse(3)
for (int x = x_m; x <= x_M; x += 1)
{
for (int y = y_m; y <= y_M; y += 1)
{
for (int z = z_m; z <= z_M; z += 1)
{
float r0 = vp[x + 12][y + 12][z + 12]*vp[x + 12][y + 12][z + 12];
u[t1][x + 12][y + 12][z + 12] = 2.0F*(5.0e-1F*r0*(dt*dt)*(-1.50312647e-7F*(u[t0][x + 6][y + 12][z + 12] + u[t0][x + 12][y + 6][z + 12] + u[t0][x + 12][y + 12][z + 6] + u[t0][x + 12][y + 12][z + 18] + u[t0][x + 12][y + 18][z + 12] + u[t0][x + 18][y + 12][z + 12]) + 2.59740254e-6F*(u[t0][x + 7][y + 12][z + 12] + u[t0][x + 12][y + 7][z + 12] + u[t0][x + 12][y + 12][z + 7] + u[t0][x + 12][y + 12][z + 17] + u[t0][x + 12][y + 17][z + 12] + u[t0][x + 17][y + 12][z + 12]) - 2.23214281e-5F*(u[t0][x + 8][y + 12][z + 12] + u[t0][x + 12][y + 8][z + 12] + u[t0][x + 12][y + 12][z + 8] + u[t0][x + 12][y + 12][z + 16] + u[t0][x + 12][y + 16][z + 12] + u[t0][x + 16][y + 12][z + 12]) + 1.32275129e-4F*(u[t0][x + 9][y + 12][z + 12] + u[t0][x + 12][y + 9][z + 12] + u[t0][x + 12][y + 12][z + 9] + u[t0][x + 12][y + 12][z + 15] + u[t0][x + 12][y + 15][z + 12] + u[t0][x + 15][y + 12][z + 12]) - 6.69642842e-4F*(u[t0][x + 10][y + 12][z + 12] + u[t0][x + 12][y + 10][z + 12] + u[t0][x + 12][y + 12][z + 10] + u[t0][x + 12][y + 12][z + 14] + u[t0][x + 12][y + 14][z + 12] + u[t0][x + 14][y + 12][z + 12]) + 4.28571419e-3F*(u[t0][x + 11][y + 12][z + 12] + u[t0][x + 12][y + 11][z + 12] + u[t0][x + 12][y + 12][z + 11] + u[t0][x + 12][y + 12][z + 13] + u[t0][x + 12][y + 13][z + 12] + u[t0][x + 13][y + 12][z + 12]) - 2.23708328e-2F*u[t0][x + 12][y + 12][z + 12]) + 5.0e-1F*(r0*dt*damp[x + 1][y + 1][z + 1]*u[t0][x + 12][y + 12][z + 12] - u[t2][x + 12][y + 12][z + 12]) + 1.0F*u[t0][x + 12][y + 12][z + 12])/(r0*dt*damp[x + 1][y + 1][z + 1] + 1);
}
}
}
/* End section0 */
gettimeofday(&end_section0, NULL);
timers->section0 += (double)(end_section0.tv_sec-start_section0.tv_sec)+(double)(end_section0.tv_usec-start_section0.tv_usec)/1000000;
struct timeval start_section1, end_section1;
gettimeofday(&start_section1, NULL);
/* Begin section1 */
#pragma omp target teams distribute parallel for collapse(1)
for (int p_src = p_src_m; p_src <= p_src_M; p_src += 1)
{
int ii_src_0 = (int)(floor(-5.0e-2*o_x + 5.0e-2*src_coords[p_src][0]));
int ii_src_1 = (int)(floor(-5.0e-2*o_y + 5.0e-2*src_coords[p_src][1]));
int ii_src_2 = (int)(floor(-5.0e-2*o_z + 5.0e-2*src_coords[p_src][2]));
int ii_src_3 = (int)(floor(-5.0e-2*o_z + 5.0e-2*src_coords[p_src][2])) + 1;
int ii_src_4 = (int)(floor(-5.0e-2*o_y + 5.0e-2*src_coords[p_src][1])) + 1;
int ii_src_5 = (int)(floor(-5.0e-2*o_x + 5.0e-2*src_coords[p_src][0])) + 1;
float px = (float)(-o_x - 2.0e+1F*(int)(floor(-5.0e-2F*o_x + 5.0e-2F*src_coords[p_src][0])) + src_coords[p_src][0]);
float py = (float)(-o_y - 2.0e+1F*(int)(floor(-5.0e-2F*o_y + 5.0e-2F*src_coords[p_src][1])) + src_coords[p_src][1]);
float pz = (float)(-o_z - 2.0e+1F*(int)(floor(-5.0e-2F*o_z + 5.0e-2F*src_coords[p_src][2])) + src_coords[p_src][2]);
if (ii_src_0 >= x_m - 1 && ii_src_1 >= y_m - 1 && ii_src_2 >= z_m - 1 && ii_src_0 <= x_M + 1 && ii_src_1 <= y_M + 1 && ii_src_2 <= z_M + 1)
{
float r1 = (dt*dt)*(vp[ii_src_0 + 12][ii_src_1 + 12][ii_src_2 + 12]*vp[ii_src_0 + 12][ii_src_1 + 12][ii_src_2 + 12])*(-1.25e-4F*px*py*pz + 2.5e-3F*px*py + 2.5e-3F*px*pz - 5.0e-2F*px + 2.5e-3F*py*pz - 5.0e-2F*py - 5.0e-2F*pz + 1)*src[time][p_src];
#pragma omp atomic update
u[t1][ii_src_0 + 12][ii_src_1 + 12][ii_src_2 + 12] += r1;
}
if (ii_src_0 >= x_m - 1 && ii_src_1 >= y_m - 1 && ii_src_3 >= z_m - 1 && ii_src_0 <= x_M + 1 && ii_src_1 <= y_M + 1 && ii_src_3 <= z_M + 1)
{
float r2 = (dt*dt)*(vp[ii_src_0 + 12][ii_src_1 + 12][ii_src_3 + 12]*vp[ii_src_0 + 12][ii_src_1 + 12][ii_src_3 + 12])*(1.25e-4F*px*py*pz - 2.5e-3F*px*pz - 2.5e-3F*py*pz + 5.0e-2F*pz)*src[time][p_src];
#pragma omp atomic update
u[t1][ii_src_0 + 12][ii_src_1 + 12][ii_src_3 + 12] += r2;
}
if (ii_src_0 >= x_m - 1 && ii_src_2 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_0 <= x_M + 1 && ii_src_2 <= z_M + 1 && ii_src_4 <= y_M + 1)
{
float r3 = (dt*dt)*(vp[ii_src_0 + 12][ii_src_4 + 12][ii_src_2 + 12]*vp[ii_src_0 + 12][ii_src_4 + 12][ii_src_2 + 12])*(1.25e-4F*px*py*pz - 2.5e-3F*px*py - 2.5e-3F*py*pz + 5.0e-2F*py)*src[time][p_src];
#pragma omp atomic update
u[t1][ii_src_0 + 12][ii_src_4 + 12][ii_src_2 + 12] += r3;
}
if (ii_src_0 >= x_m - 1 && ii_src_3 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_0 <= x_M + 1 && ii_src_3 <= z_M + 1 && ii_src_4 <= y_M + 1)
{
float r4 = (dt*dt)*(vp[ii_src_0 + 12][ii_src_4 + 12][ii_src_3 + 12]*vp[ii_src_0 + 12][ii_src_4 + 12][ii_src_3 + 12])*(-1.25e-4F*px*py*pz + 2.5e-3F*py*pz)*src[time][p_src];
#pragma omp atomic update
u[t1][ii_src_0 + 12][ii_src_4 + 12][ii_src_3 + 12] += r4;
}
if (ii_src_1 >= y_m - 1 && ii_src_2 >= z_m - 1 && ii_src_5 >= x_m - 1 && ii_src_1 <= y_M + 1 && ii_src_2 <= z_M + 1 && ii_src_5 <= x_M + 1)
{
float r5 = (dt*dt)*(vp[ii_src_5 + 12][ii_src_1 + 12][ii_src_2 + 12]*vp[ii_src_5 + 12][ii_src_1 + 12][ii_src_2 + 12])*(1.25e-4F*px*py*pz - 2.5e-3F*px*py - 2.5e-3F*px*pz + 5.0e-2F*px)*src[time][p_src];
#pragma omp atomic update
u[t1][ii_src_5 + 12][ii_src_1 + 12][ii_src_2 + 12] += r5;
}
if (ii_src_1 >= y_m - 1 && ii_src_3 >= z_m - 1 && ii_src_5 >= x_m - 1 && ii_src_1 <= y_M + 1 && ii_src_3 <= z_M + 1 && ii_src_5 <= x_M + 1)
{
float r6 = (dt*dt)*(vp[ii_src_5 + 12][ii_src_1 + 12][ii_src_3 + 12]*vp[ii_src_5 + 12][ii_src_1 + 12][ii_src_3 + 12])*(-1.25e-4F*px*py*pz + 2.5e-3F*px*pz)*src[time][p_src];
#pragma omp atomic update
u[t1][ii_src_5 + 12][ii_src_1 + 12][ii_src_3 + 12] += r6;
}
if (ii_src_2 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_5 >= x_m - 1 && ii_src_2 <= z_M + 1 && ii_src_4 <= y_M + 1 && ii_src_5 <= x_M + 1)
{
float r7 = (dt*dt)*(vp[ii_src_5 + 12][ii_src_4 + 12][ii_src_2 + 12]*vp[ii_src_5 + 12][ii_src_4 + 12][ii_src_2 + 12])*(-1.25e-4F*px*py*pz + 2.5e-3F*px*py)*src[time][p_src];
#pragma omp atomic update
u[t1][ii_src_5 + 12][ii_src_4 + 12][ii_src_2 + 12] += r7;
}
if (ii_src_3 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_5 >= x_m - 1 && ii_src_3 <= z_M + 1 && ii_src_4 <= y_M + 1 && ii_src_5 <= x_M + 1)
{
float r8 = 1.25e-4F*px*py*pz*(dt*dt)*(vp[ii_src_5 + 12][ii_src_4 + 12][ii_src_3 + 12]*vp[ii_src_5 + 12][ii_src_4 + 12][ii_src_3 + 12])*src[time][p_src];
#pragma omp atomic update
u[t1][ii_src_5 + 12][ii_src_4 + 12][ii_src_3 + 12] += r8;
}
}
/* End section1 */
gettimeofday(&end_section1, NULL);
timers->section1 += (double)(end_section1.tv_sec-start_section1.tv_sec)+(double)(end_section1.tv_usec-start_section1.tv_usec)/1000000;
struct timeval start_section2, end_section2;
gettimeofday(&start_section2, NULL);
/* Begin section2 */
#pragma omp target teams distribute parallel for collapse(1)
for (int p_rec = p_rec_m; p_rec <= p_rec_M; p_rec += 1)
{
int ii_rec_0 = (int)(floor(-5.0e-2*o_x + 5.0e-2*rec_coords[p_rec][0]));
int ii_rec_1 = (int)(floor(-5.0e-2*o_y + 5.0e-2*rec_coords[p_rec][1]));
int ii_rec_2 = (int)(floor(-5.0e-2*o_z + 5.0e-2*rec_coords[p_rec][2]));
int ii_rec_3 = (int)(floor(-5.0e-2*o_z + 5.0e-2*rec_coords[p_rec][2])) + 1;
int ii_rec_4 = (int)(floor(-5.0e-2*o_y + 5.0e-2*rec_coords[p_rec][1])) + 1;
int ii_rec_5 = (int)(floor(-5.0e-2*o_x + 5.0e-2*rec_coords[p_rec][0])) + 1;
float px = (float)(-o_x - 2.0e+1F*(int)(floor(-5.0e-2F*o_x + 5.0e-2F*rec_coords[p_rec][0])) + rec_coords[p_rec][0]);
float py = (float)(-o_y - 2.0e+1F*(int)(floor(-5.0e-2F*o_y + 5.0e-2F*rec_coords[p_rec][1])) + rec_coords[p_rec][1]);
float pz = (float)(-o_z - 2.0e+1F*(int)(floor(-5.0e-2F*o_z + 5.0e-2F*rec_coords[p_rec][2])) + rec_coords[p_rec][2]);
float sum = 0.0F;
if (ii_rec_0 >= x_m - 1 && ii_rec_1 >= y_m - 1 && ii_rec_2 >= z_m - 1 && ii_rec_0 <= x_M + 1 && ii_rec_1 <= y_M + 1 && ii_rec_2 <= z_M + 1)
{
sum += (-1.25e-4F*px*py*pz + 2.5e-3F*px*py + 2.5e-3F*px*pz - 5.0e-2F*px + 2.5e-3F*py*pz - 5.0e-2F*py - 5.0e-2F*pz + 1)*u[t0][ii_rec_0 + 12][ii_rec_1 + 12][ii_rec_2 + 12];
}
if (ii_rec_0 >= x_m - 1 && ii_rec_1 >= y_m - 1 && ii_rec_3 >= z_m - 1 && ii_rec_0 <= x_M + 1 && ii_rec_1 <= y_M + 1 && ii_rec_3 <= z_M + 1)
{
sum += (1.25e-4F*px*py*pz - 2.5e-3F*px*pz - 2.5e-3F*py*pz + 5.0e-2F*pz)*u[t0][ii_rec_0 + 12][ii_rec_1 + 12][ii_rec_3 + 12];
}
if (ii_rec_0 >= x_m - 1 && ii_rec_2 >= z_m - 1 && ii_rec_4 >= y_m - 1 && ii_rec_0 <= x_M + 1 && ii_rec_2 <= z_M + 1 && ii_rec_4 <= y_M + 1)
{
sum += (1.25e-4F*px*py*pz - 2.5e-3F*px*py - 2.5e-3F*py*pz + 5.0e-2F*py)*u[t0][ii_rec_0 + 12][ii_rec_4 + 12][ii_rec_2 + 12];
}
if (ii_rec_0 >= x_m - 1 && ii_rec_3 >= z_m - 1 && ii_rec_4 >= y_m - 1 && ii_rec_0 <= x_M + 1 && ii_rec_3 <= z_M + 1 && ii_rec_4 <= y_M + 1)
{
sum += (-1.25e-4F*px*py*pz + 2.5e-3F*py*pz)*u[t0][ii_rec_0 + 12][ii_rec_4 + 12][ii_rec_3 + 12];
}
if (ii_rec_1 >= y_m - 1 && ii_rec_2 >= z_m - 1 && ii_rec_5 >= x_m - 1 && ii_rec_1 <= y_M + 1 && ii_rec_2 <= z_M + 1 && ii_rec_5 <= x_M + 1)
{
sum += (1.25e-4F*px*py*pz - 2.5e-3F*px*py - 2.5e-3F*px*pz + 5.0e-2F*px)*u[t0][ii_rec_5 + 12][ii_rec_1 + 12][ii_rec_2 + 12];
}
if (ii_rec_1 >= y_m - 1 && ii_rec_3 >= z_m - 1 && ii_rec_5 >= x_m - 1 && ii_rec_1 <= y_M + 1 && ii_rec_3 <= z_M + 1 && ii_rec_5 <= x_M + 1)
{
sum += (-1.25e-4F*px*py*pz + 2.5e-3F*px*pz)*u[t0][ii_rec_5 + 12][ii_rec_1 + 12][ii_rec_3 + 12];
}
if (ii_rec_2 >= z_m - 1 && ii_rec_4 >= y_m - 1 && ii_rec_5 >= x_m - 1 && ii_rec_2 <= z_M + 1 && ii_rec_4 <= y_M + 1 && ii_rec_5 <= x_M + 1)
{
sum += (-1.25e-4F*px*py*pz + 2.5e-3F*px*py)*u[t0][ii_rec_5 + 12][ii_rec_4 + 12][ii_rec_2 + 12];
}
if (ii_rec_3 >= z_m - 1 && ii_rec_4 >= y_m - 1 && ii_rec_5 >= x_m - 1 && ii_rec_3 <= z_M + 1 && ii_rec_4 <= y_M + 1 && ii_rec_5 <= x_M + 1)
{
sum += 1.25e-4F*px*py*pz*u[t0][ii_rec_5 + 12][ii_rec_4 + 12][ii_rec_3 + 12];
}
rec[time][p_rec] = sum;
}
/* End section2 */
gettimeofday(&end_section2, NULL);
timers->section2 += (double)(end_section2.tv_sec-start_section2.tv_sec)+(double)(end_section2.tv_usec-start_section2.tv_usec)/1000000;
}
#pragma omp target update from(rec[0:rec_vec->size[0]][0:rec_vec->size[1]])
#pragma omp target exit data map(release: rec[0:rec_vec->size[0]][0:rec_vec->size[1]])
#pragma omp target update from(u[0:u_vec->size[0]][0:u_vec->size[1]][0:u_vec->size[2]][0:u_vec->size[3]])
#pragma omp target exit data map(release: u[0:u_vec->size[0]][0:u_vec->size[1]][0:u_vec->size[2]][0:u_vec->size[3]])
#pragma omp target exit data map(delete: damp[0:damp_vec->size[0]][0:damp_vec->size[1]][0:damp_vec->size[2]])
#pragma omp target exit data map(delete: rec_coords[0:rec_coords_vec->size[0]][0:rec_coords_vec->size[1]])
#pragma omp target exit data map(delete: src[0:src_vec->size[0]][0:src_vec->size[1]])
#pragma omp target exit data map(delete: src_coords[0:src_coords_vec->size[0]][0:src_coords_vec->size[1]])
#pragma omp target exit data map(delete: vp[0:vp_vec->size[0]][0:vp_vec->size[1]][0:vp_vec->size[2]])
return 0;
}
/* Backdoor edit at Wed Mar 4 19:26:11 2020*/
/* Backdoor edit at Wed Mar 4 19:30:55 2020*/
/* Backdoor edit at Wed Mar 4 19:32:37 2020*/
|
denseAsyncBlocksJacobi.h | //
// Created by mbarb on 14/02/2018.
//
#ifndef PARALLELITERATIVE_DENSEASYNCBLOCKJACOBI_H
#define PARALLELITERATIVE_DENSEASYNCBLOCKJACOBI_H
#include <Eigen>
#include <iostream>
#include <omp.h>
#include "utils.h"
#include "denseParallelJacobi.h"
namespace Iterative {
template <typename Scalar, long long SIZE>
class denseAsyncBlocksJacobi : public denseParallelJacobi<Scalar, SIZE> {
public:
/**
*
* @param matrix linear system matrix
* @param vector known term vector
* @param iterations max number of iterations
* @param tolerance min error tolerated
* @param workers number of threads
* @param blockSize size of the block
*/
explicit denseAsyncBlocksJacobi(
const Eigen::Matrix<Scalar, SIZE, SIZE>& matrix,
const Eigen::ColumnVector<Scalar, SIZE>& vector,
const ulonglong iterations,
const Scalar tolerance,
const ulong workers=0L,
const ulonglong blockSize = 0L):
denseParallelJacobi<Scalar,SIZE>::denseParallelJacobi(matrix, vector, iterations, tolerance, workers) {
this->blockSize = blockSize;
if (blockSize == 0)
this->blockSize = std::max(ulong(this->A.cols() / workers), (ulong) 1L);
splitter();
}
const Eigen::ColumnVector<Scalar, SIZE> solve() {
Eigen::ColumnVector<Scalar, SIZE> oldSolution(this->solution);
std::vector<Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>> inverses(blocks.size());
// compute the inverses of the blocks and memorize it
#pragma omp parallel for
for (int i = 0; i < blocks.size(); ++i) {
inverses[i] = this->A.block(blocks[i].startCol, blocks[i].startRow, blocks[i].cols,
blocks[i].rows).inverse();
}
std::vector<int> index;
auto stop = false;
for (this->iteration=0L; this->iteration < this->iterations && !stop; ++this->iteration) {
#pragma omp parallel
#pragma omp for private(oldSolution) schedule(dynamic) nowait
for (int i = 0; i < inverses.size(); ++i) {
oldSolution = this->solution;
// set zero the components of the solution b that corresponds to the inverse
Eigen::ColumnVector<Scalar, Eigen::Dynamic> oldBlock = oldSolution.segment(
blocks[i].startCol,
blocks[i].cols);
auto zeroBlock = oldSolution.segment(blocks[i].startCol, blocks[i].cols);
zeroBlock.setZero();
// the segment of the solution b that this inverse approximates
auto block = this->solution.segment(blocks[i].startCol, blocks[i].cols);
// approximate the solution using the inverse and the solution at the previous iteration
block = inverses[i] *
(this->b - (this->A * oldSolution)).segment(blocks[i].startCol, blocks[i].cols);
zeroBlock = block;
if ((oldBlock - block).template lpNorm<1>() / block.size() <= this->tolerance) {
#pragma omp critical
index.emplace_back(i);
}
}
if (!index.empty()) {
#pragma omp barrier
#pragma omp single
{
std::sort(index.rbegin(), index.rend());
for (auto i : index) {
blocks.erase(blocks.begin() + i);
inverses.erase(inverses.begin() + i);
}
index.clear();
stop = inverses.empty();
};
}
}
#pragma omp barrier
std::cout << this->iteration << std::endl;
return this->solution;
}
protected:
ulonglong blockSize;
std::vector<Index> blocks;
void splitter() {
for (ulonglong i = 0; i < this->A.cols(); i += blockSize) {
blocks.emplace_back(Index(i, std::min(blockSize, (ulonglong) this->A.cols() - i),
i, std::min(blockSize, (ulonglong) this->A.rows() - i)));
}
}
private:
};
}
#endif //PARALLELITERATIVE_ASYNCJACOBI_H
|
kmp_num_teams.c | // RUN: %libomp-compile-and-run
// UNSUPPORTED: gcc
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#define NT 8
#ifdef __cplusplus
extern "C" {
#endif
typedef int kmp_int32;
typedef struct ident {
kmp_int32 reserved_1;
kmp_int32 flags;
kmp_int32 reserved_2;
kmp_int32 reserved_3;
char const *psource;
} ident_t;
extern int __kmpc_global_thread_num(ident_t *);
extern void __kmpc_push_num_teams_51(ident_t *, kmp_int32, kmp_int32, kmp_int32,
kmp_int32);
#ifdef __cplusplus
}
#endif
void check_num_teams(int num_teams_lb, int num_teams_ub, int thread_limit) {
int nteams, nthreads;
int a = 0;
int gtid = __kmpc_global_thread_num(NULL);
__kmpc_push_num_teams_51(NULL, gtid, num_teams_lb, num_teams_ub,
thread_limit);
#pragma omp teams default(shared)
{
int priv_nteams;
int team_num = omp_get_team_num();
if (team_num == 0)
nteams = omp_get_num_teams();
priv_nteams = omp_get_num_teams();
#pragma omp parallel
{
int priv_nthreads;
int thread_num = omp_get_thread_num();
int teams_ub, teams_lb, thr_limit;
if (team_num == 0 && thread_num == 0)
nthreads = omp_get_num_threads();
priv_nthreads = omp_get_num_threads();
teams_ub = (num_teams_ub ? num_teams_ub : priv_nteams);
teams_lb = (num_teams_lb ? num_teams_lb : teams_ub);
thr_limit = (thread_limit ? thread_limit : priv_nthreads);
if (priv_nteams < teams_lb || priv_nteams > teams_ub) {
fprintf(stderr, "error: invalid number of teams=%d\n", priv_nteams);
exit(1);
}
if (priv_nthreads > thr_limit) {
fprintf(stderr, "error: invalid number of threads=%d\n", priv_nthreads);
exit(1);
}
#pragma omp atomic
a++;
}
}
if (a != nteams * nthreads) {
fprintf(stderr, "error: a (%d) != nteams * nthreads (%d)\n", a,
nteams * nthreads);
exit(1);
} else {
printf("#teams %d, #threads %d: Hello!\n", nteams, nthreads);
}
}
int main(int argc, char *argv[]) {
omp_set_num_threads(NT);
check_num_teams(1, 8, 2);
check_num_teams(2, 2, 2);
check_num_teams(2, 2, 0);
check_num_teams(8, 16, 2);
check_num_teams(9, 16, 0);
check_num_teams(9, 16, 2);
check_num_teams(2, 3, 0);
check_num_teams(0, 0, 2);
check_num_teams(0, 4, 0);
check_num_teams(0, 2, 2);
printf("Test Passed\n");
return 0;
}
|
Blank.h | /// \ingroup base
/// \class ttk::Blank
/// \author Your Name Here <Your Email Address Here>
/// \date The Date Here.
///
/// \brief TTK %blank processing package.
///
/// %Blank is a TTK processing package that takes a scalar field on the input
/// and produces a scalar field on the output.
///
/// \sa ttk::Triangulation
/// \sa ttkBlank.cpp %for a usage example.
#pragma once
// base code includes
#include <Triangulation.h>
#include <Wrapper.h>
namespace ttk{
namespace blank{
class Blank : public Debug{
public:
Blank();
~Blank();
/// Execute the package.
/// \pre If this TTK package uses ttk::Triangulation for fast mesh
/// traversals, the function setupTriangulation() must be called on this
/// object prior to this function, in a clearly distinct pre-processing
/// steps. An error will be returned otherwise.
/// \note In such a case, it is recommended to exclude
/// setupTriangulation() from any time performance measurement.
/// \param argment Dummy integer argument.
/// \return Returns 0 upon success, negative values otherwise.
template <class dataType>
int execute(const int &argument) const;
/// Pass a pointer to an input array representing a scalarfield.
/// The expected format for the array is the following:
/// <vertex0-component0> <vertex0-component1> ... <vertex0-componentN>
/// <vertex1-component0> <vertex1-component1> ... <vertex1-componentN>
/// <vertexM-component0> <vertexM-component1> ... <vertexM-componentN>.
/// The array is expected to be correctly allocated.
/// \param data Pointer to the data array.
/// \return Returns 0 upon success, negative values otherwise.
/// \sa setVertexNumber() and setDimensionNumber().
inline int setInputDataPointer(void *data){
inputData_ = data;
return 0;
}
/// Pass a pointer to an output array representing a scalar field.
/// The expected format for the array is the following:
/// <vertex0-component0> <vertex0-component1> ... <vertex0-componentN>
/// <vertex1-component0> <vertex1-component1> ... <vertex1-componentN>
/// <vertexM-component0> <vertexM-component1> ... <vertexM-componentN>.
/// The array is expected to be correctly allocated.
/// \param data Pointer to the data array.
/// \return Returns 0 upon success, negative values otherwise.
/// \sa setVertexNumber() and setDimensionNumber().
inline int setOutputDataPointer(void *data){
outputData_ = data;
return 0;
}
// General documentation info:
//
/// Setup a (valid) triangulation object for this TTK base object.
///
/// \pre This function should be called prior to any usage of this TTK
/// object, in a clearly distinct pre-processing step that involves no
/// traversal or computation at all. An error will be returned otherwise.
///
/// \note It is recommended to exclude this pre-processing function from
/// any time performance measurement. Therefore, it is recommended to
/// call this function ONLY in the pre-processing steps of your program.
/// Note however, that your triangulation object must be valid when
/// calling this function (i.e. you should have filled it at this point,
/// see the setInput*() functions of ttk::Triangulation). See ttkBlank
/// for further examples.
///
/// \param triangulation Pointer to a valid triangulation.
/// \return Returns 0 upon success, negative values otherwise.
/// \sa ttk::Triangulation
//
//
// Developer info:
// ttk::Triangulation is a generic triangulation representation that
// enables fast mesh traversal, either on explicit triangulations (i.e.
// tet-meshes) or implicit triangulations (i.e. low-memory footprint
// implicit triangulations obtained from regular grids).
//
// Not all TTK packages need such mesh traversal features. If your
// TTK package needs any mesh traversal procedure, we recommend to use
// ttk::Triangulation as described here.
//
// Each call to a traversal procedure of ttk::Triangulation
// must satisfy some pre-condition (see ttk::Triangulation for more
// details). Such pre-condition functions are typically called from this
// function.
inline int setupTriangulation(Triangulation *triangulation){
triangulation_ = triangulation;
if(triangulation_){
// TODO-1
// Pre-condition functions.
// Call all the required pre-condition functions here!
// for example:
triangulation_->preprocessVertexNeighbors();
// end of TODO-1
}
return 0;
}
protected:
void *inputData_, *outputData_;
Triangulation *triangulation_;
};
}
}
// if the package is a pure template class, uncomment the following line
// #include <Blank.cpp>
// template functions
template <class dataType> int ttk::blank::Blank::execute(
const int &argument) const{
Timer t;
// check the consistency of the variables -- to adapt
#ifndef TTK_ENABLE_KAMIKAZE
if(!triangulation_)
return -1;
if(!inputData_)
return -2;
if(!outputData_)
return -3;
#endif
dataType *outputData = (dataType *) outputData_;
dataType *inputData = (dataType *) inputData_;
SimplexId vertexNumber = triangulation_->getNumberOfVertices();
// init the output -- to adapt
for(SimplexId i = 0; i < vertexNumber; i++){
outputData[i] = inputData[i];
}
// the following open-mp processing is only relevant for embarrassingly
// parallel algorithms (such as smoothing) -- to adapt
#ifdef TTK_ENABLE_OPENMP
#pragma omp parallel for num_threads(threadNumber_)
#endif
for(SimplexId i = 0; i < vertexNumber; i++){
// TODO-2
// processing here!
// end of TODO-2
}
{
std::stringstream msg;
msg << "[Blank] Data-set (" << vertexNumber
<< " points) processed in "
<< t.getElapsedTime() << " s. (" << threadNumber_
<< " thread(s))."
<< std::endl;
dMsg(std::cout, msg.str(), timeMsg);
}
return 0;
}
|
Main.c | /*******************************************************************************
Copyright (c) 2016 Advanced Micro Devices, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
#include "XSbench_header.h"
#ifdef MPI
#include<mpi.h>
#endif
int main( int argc, char* argv[] )
{
// =====================================================================
// Initialization & Command Line Read-In
// =====================================================================
int version = 13; // Version number
int thread; // OMP thread index, material index
unsigned long seed; // RNG seed for OMP version
double tick, tock; // Start time, end time, particle energy
double dval = 0; // A dummy value, used to reduce xs values
unsigned long long vhash = 0; // The verfication hash
int nprocs; // Number of MPI procs
int mype = 0; // MPI rank
char HM[6]; // Size of HM benchmark problem
// Fractions (by volume) of materials in the reactor core.
// These are used as probabilities to approximate where xs lookups will occur.
double dist[12] = {
0.140, // fuel
0.052, // cladding
0.275, // cold, borated water
0.134, // hot, borated water
0.154, // RPV
0.064, // Lower, radial reflector
0.066, // Upper reflector / top plate
0.055, // bottom plate
0.008, // bottom nozzle
0.015, // top nozzle
0.025, // top of fuel assemblies
0.013 // bottom of fuel assemblies
};
#ifdef MPI
MPI_Status stat;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
MPI_Comm_rank(MPI_COMM_WORLD, &mype);
#endif
// rand() is only used in the serial initialization stages.
// A custom RNG is used in parallel portions.
#ifdef VERIFICATION
srand(26);
#else
srand(time(NULL));
#endif
// Process CLI Fields -- store in "Inputs" structure
// Duplicate as constant values to resolve data dependencies in _OPENACC loops.
Inputs in = read_CLI(argc, argv);
const int nthreads = in.nthreads;
const long n_isotopes = in.n_isotopes;
const long n_gridpoints = in.n_gridpoints;
const int lookups = in.lookups;
// Set number of OpenMP Threads
omp_set_num_threads(in.nthreads);
// Print-out of Input Summary
if(mype == 0)
print_inputs(in, nprocs, version);
// =====================================================================
// Prepare Nuclide Energy Grids, Unionized Energy Grid, & Material Data
// =====================================================================
// === Allocate & fill energy grids
#ifndef BINARY_READ
if(mype == 0) printf("Generating Nuclide Energy Grids...\n");
#endif
// allocates nuclide_grids[0:n_isotopes][0:n_gridpoints]
NuclideGridPoint ** nuclide_grids = gpmatrix(n_isotopes,n_gridpoints);
// fill grids deterministically or randomly
#ifdef VERIFICATION
generate_grids_v(n_isotopes, n_gridpoints, nuclide_grids);
#else
generate_grids(n_isotopes, n_gridpoints, nuclide_grids);
#endif
// Sort grids by energy
#ifndef BINARY_READ
if(mype == 0) printf("Sorting Nuclide Energy Grids...\n");
sort_nuclide_grids(n_isotopes, n_gridpoints, nuclide_grids);
#endif
// Prepare Unionized Energy Grid Framework
int * restrict grid_ptrs = generate_ptr_grid(n_isotopes, n_gridpoints);
#ifndef BINARY_READ
GridPoint * restrict energy_grid = generate_energy_grid(n_isotopes,
n_gridpoints, nuclide_grids, grid_ptrs);
#else
GridPoint * restrict energy_grid = (GridPoint *)malloc(n_isotopes *
n_gridpoints * sizeof(GridPoint));
for(i = 0; i < n_isotopes*n_gridpoints; i++)
energy_grid[i].xs_ptrs = i*n_isotopes;
#endif
// Double Indexing. Filling in energy_grid with pointers to the
// nuclide_energy_grids.
#ifndef BINARY_READ
set_grid_ptrs(energy_grid, grid_ptrs, n_isotopes, n_gridpoints, nuclide_grids);
#endif
#ifdef BINARY_READ
if(mype == 0) printf("Reading data from \"XS_data.dat\" file...\n");
binary_read(n_isotopes, n_gridpoints, nuclide_grids, energy_grid, grid_ptrs);
#endif
// Get material data
if(mype == 0) printf("Loading Mats...\n");
int size_mats;
if (n_isotopes == 68)
size_mats = 197;
else
size_mats = 484;
// The number of nuclides in each material
int * restrict num_nucs = load_num_nucs(n_isotopes);
// The indices of each material
int * restrict mats_idx = load_mats_idx(num_nucs);
// The nuclide identities of each material
int * restrict mats = load_mats(num_nucs, mats_idx, size_mats, n_isotopes);
// The concentrations of nuclides in each material
#ifdef VERIFICATION
double * restrict concs = load_concs_v(size_mats);
#else
double * restrict concs = load_concs(size_mats);
#endif
// Generate a stream of random numbers to copyin to device
double * restrict rands = malloc(2*lookups*sizeof(double));
for(int i=0; i<lookups; i++){
#ifdef VERIFICATION
rands[2*i] = rn_v();
rands[2*i+1] = rn_v();
#else
rands[2*i] = (double) rand() / (double) RAND_MAX;
rands[2*i+1] = (double) rand() / (double) RAND_MAX;
#endif
}
// Allocate arrays for results to copyout from device
#ifdef VERIFICATION
int n_v_ints = lookups;
int n_v_doubles = 6*lookups;
#else
int n_v_ints = 1;
int n_v_doubles = 1;
#endif
int * restrict v_ints = malloc(n_v_ints*sizeof(int));
double * restrict v_doubles = malloc(n_v_doubles*sizeof(double));
#ifdef VERIFICATION
for(int i = 0; i < lookups; i++){
v_ints[i] = 0.0;
v_doubles[i + 0] = 0.0;
v_doubles[i + 1] = 0.0;
v_doubles[i + 2] = 0.0;
v_doubles[i + 3] = 0.0;
v_doubles[i + 4] = 0.0;
v_doubles[i + 5] = 0.0;
}
#endif
#ifdef BINARY_DUMP
if(mype == 0) printf("Dumping data to binary file...\n");
binary_dump(n_isotopes, n_gridpoints, nuclide_grids, energy_grid, grid_ptrs);
if(mype == 0) printf("Binary file \"XS_data.dat\" written! Exiting...\n");
return 0;
#endif
// =====================================================================
// Cross Section (XS) Parallel Lookup Simulation Begins
// =====================================================================
if(mype == 0){
printf("\n");
border_print();
center_print("SIMULATION", 79);
border_print();
}
// In order to get the OpenMP4.X compiler working as of 1/27/2016, needed
// to pass in the flattened array instead of 2D structure
NuclideGridPoint * _nuclide_grids = (NuclideGridPoint *)&nuclide_grids[0][0];
tick = timer();
#pragma omp target \
map(to:n_isotopes, \
n_gridpoints, \
lookups, \
energy_grid[0:n_isotopes*n_gridpoints], \
_nuclide_grids[0:n_isotopes * n_gridpoints], \
grid_ptrs[0:n_isotopes*n_isotopes*n_gridpoints], \
mats[0:size_mats], \
mats_idx[0:12], \
concs[0:size_mats], \
num_nucs[0:12], \
dist[0:12], \
rands[0:2*lookups]) \
map(tofrom:v_ints[0:n_v_ints], \
v_doubles[0:n_v_doubles])
#pragma omp teams distribute parallel for
for(int i = 0; i < lookups; i++){
// Randomly pick an energy and material for the particle
double p_energy = rands[2*i];
double roll = rands[2*i+1];
// Use distribution to pick a material
// ( inlined from pick_mat(mat_roll))
int mat;
for(mat = 0; mat < 12; mat++)
{
double running = 0;
for(int j = mat; j > 0; j-- )
running += dist[j];
if( roll < running )
break;
}
mat = mat % 12;
// This returns the macro_xs_vector, but we're not going
// to do anything with it in this program, so return value
// is written over.
// INLINE: calculate_macro_xs( p_energy, mat, n_isotopes,
// n_gridpoints, num_nucs, concs,
// energy_grid, grid_ptrs, nuclide_grids, mats, mats_idx,
// macro_xs_vector );
double macro_xs_0 = 0;
double macro_xs_1 = 0;
double macro_xs_2 = 0;
double macro_xs_3 = 0;
double macro_xs_4 = 0;
// binary search for energy on unionized energy grid (UEG)
// INLINE :
// long idx = grid_search(n_isotopes * n_gridpoints, p_energy, energy_grid);
long idx = 0; //lowerLimit
long upperLimit = (n_isotopes * n_gridpoints) - 1;
long examinationPoint;
long length = upperLimit - idx;
while( length > 1 ){
examinationPoint = idx + ( length / 2 );
if( energy_grid[examinationPoint].energy > p_energy )
upperLimit = examinationPoint;
else
idx = examinationPoint;
length = upperLimit - idx;
}
// Once we find the pointer array on the UEG, we can pull the data
// from the respective nuclide grids, as well as the nuclide
// concentration data for the material
// Each nuclide from the material needs to have its micro-XS array
// looked up & interpolatied (via calculate_micro_xs). Then, the
// micro XS is multiplied by the concentration of that nuclide
// in the material, and added to the total macro XS array.
for(int j = 0; j < num_nucs[mat]; j++){
// the nuclide we are looking up
int p_nuc = mats[mats_idx[mat] + j];
// the concentration of the nuclide in the material
double conc = concs[mats_idx[mat] + j];
// Interpolation factor
double f;
// Bounding energy gridpoints
NuclideGridPoint * low, * high;
// pull ptr from energy grid and check to ensure that
// we're not reading off the end of the nuclide's grid
if( grid_ptrs[energy_grid[idx].xs_ptrs + p_nuc] == n_gridpoints - 1 )
low = &_nuclide_grids[p_nuc*n_gridpoints + grid_ptrs[energy_grid[idx].xs_ptrs
+ p_nuc] - 1];
else
low = &_nuclide_grids[p_nuc*n_gridpoints + grid_ptrs[energy_grid[idx].xs_ptrs
+ p_nuc]];
high = low + 1;
// calculate the re-useable interpolation factor
f = (high->energy - p_energy) / (high->energy - low->energy);
// Total XS
macro_xs_0 += conc *
(high->total_xs - f * (high->total_xs - low->total_xs));
// Elastic XS
macro_xs_1 += conc *
(high->elastic_xs - f * (high->elastic_xs - low->elastic_xs));
// Absorbtion XS
macro_xs_2 += conc *
(high->absorbtion_xs - f * (high->absorbtion_xs - low->absorbtion_xs));
// Fission XS
macro_xs_3 += conc *
(high->fission_xs - f * (high->fission_xs - low->fission_xs));
// Nu Fission XS
macro_xs_4 += conc *
(high->nu_fission_xs - f * (high->nu_fission_xs - low->nu_fission_xs));
} // END: for( int j = 0; j < num_nucs[mat]; j++ )
// Accumulate results into a dummy variable for reduction
//dval += (mat + p_energy + macro_xs_0 + macro_xs_1 + macro_xs_2 + macro_xs_3 + macro_xs_4);
// Verification hash calculation
// This method provides a consistent hash accross
// architectures and compilers.
#ifndef VERIFICATION
if(i == 0){
#endif
v_ints[i] = mat;
v_doubles[6*i] = p_energy;
v_doubles[6*i+1] = macro_xs_0;
v_doubles[6*i+2] = macro_xs_1;
v_doubles[6*i+3] = macro_xs_2;
v_doubles[6*i+4] = macro_xs_3;
v_doubles[6*i+5] = macro_xs_4;
#ifndef VERIFICATION
}
#endif
} // END: for(int i = 0; i < _lookups; i++)
tock = timer();
#ifdef VERIFICATION
for(int i = 0; i < lookups; i++){
char line[256];
sprintf(line, "%.5lf %d %.5lf %.5lf %.5lf %.5lf %.5lf",
v_doubles[6*i],
v_ints[i],
v_doubles[6*i+1],
v_doubles[6*i+2],
v_doubles[6*i+3],
v_doubles[6*i+4],
v_doubles[6*i+5]);
vhash += hash((unsigned char*)line, 10000);
}
#endif
// Print / Save Results and Exit
print_results(in, mype, tock-tick, nprocs, dval, vhash);
#ifdef MPI
MPI_Finalize();
#endif
return 0;
}
|
vec2d_add.c | //////////////////////////////////////////////////////////////
// ____ //
// | __ ) ___ _ __ ___ _ _ _ __ ___ _ __ _ __ ___ //
// | _ \ / _ \ '_ \/ __| | | | '_ \ / _ \ '__| '_ \ / __| //
// | |_) | __/ | | \__ \ |_| | |_) | __/ | | |_) | (__ //
// |____/ \___|_| |_|___/\__,_| .__/ \___|_| | .__/ \___| //
// |_| |_| //
//////////////////////////////////////////////////////////////
// //
// BenLib, 2021 //
// Created: 17, March, 2021 //
// Modified: 17, March, 2021 //
// file: OpenCL_test.cpp //
// Crypto //
// Source: https://github.com/Kaixhin/cuda-workshop //
// https://forums.developer.nvidia.com/t/double-pointer-allocation/9390 //
// https://stackoverflow.com/a/31382775/10152334 //
// CPU: ALL //
// //
//////////////////////////////////////////////////////////////
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <math.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include "kernel.h"
#define THREADS_PER_BLOCK 1024 // Max 1024
void matrixAdd_CPU(int *a, int *b, int *c, int N)
{
int index = 0;
//#pragma omp parallel for
for (int col = 0; col < N; col++) {
for (int row = 0; row < N; row++) {
index = row * N + col;
c[index] = a[index] + b[index];
}
}
}
void matrixAdd_MP(int *a, int *b, int *c, int N)
{
int index = 0;
//#pragma omp parallel for schedule(dynamic, 2)
#pragma omp parallel
for (int col = 0; col < N; col++) {
#pragma omp for nowait
for (int row = 0; row < N; row++) {
index = row * N + col;
c[index] = a[index] + b[index];
}
}
}
int main()
{
int N = 8192; // Define size of 1 side of square matrix
int sqrtThreads = sqrt(THREADS_PER_BLOCK);
int nBlocks = N / sqrtThreads;
if (N % sqrtThreads != 0) { // Add an extra block if necessary
nBlocks++;
}
dim3 grid = {nBlocks, nBlocks, 1};
dim3 block = {sqrtThreads, sqrtThreads, 1};
// Initialise host pointers (dynamically allocated memory) and device pointers
int *a_h;
int *b_h;
int *c_h; // GPU results
int *d_h; // CPU results
int *a_d;
int *b_d;
int *c_d;
int size; // Number of bytes required by arrays
// Create timer
cudaEvent_t start;
cudaEvent_t stop;
float elapsedTime;
// Print out information about blocks and threads
printf("Number of threads: %i (%ix%i)\n", block.x * block.y, block.x, block.y);
printf("Number of blocks: %i (%ix%i)\n", grid.x * grid.y, grid.x, grid.y);
// Dynamically allocate host memory
size = N * N * sizeof(int);
a_h = (int *)malloc(size);
b_h = (int *)malloc(size);
c_h = (int *)malloc(size);
d_h = (int *)malloc(size);
// Load host arrays with data
for (size_t i = 0; i < N; i++) {
for (size_t j = 0; j < N; j++) {
a_h[i * N + j] = i;
b_h[i * N + j] = i;
}
}
// Allocate device memory
cudaMalloc((void **)&a_d, size);
cudaMalloc((void **)&b_d, size);
cudaMalloc((void **)&c_d, size);
// Copy host memory to device memory
cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b_h, size, cudaMemcpyHostToDevice);
cudaMemcpy(c_d, c_h, size, cudaMemcpyHostToDevice);
// Start timer for GPU
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// Launch kernel
// matrixAddKernel<<<grid, block>>>(a_d, b_d, c_d, N);
matrixAdd(grid, block, a_d, b_d, c_d, N);
// Stop timer
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
// Print execution time
printf("Time to calculate results on GPU: %f ms\n", elapsedTime);
// Copy results to device
cudaMemcpy(c_h, c_d, size, cudaMemcpyDeviceToHost);
// Start timer for CPU
cudaEventRecord(start, 0);
// Launch CPU code
matrixAdd_MP(a_h, b_h, d_h, N);
// Stop timer
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
// Print execution time
printf("Time to calculate results on CPU: %f ms\n", elapsedTime * 2.0);
// Compare results
for (size_t i = 0; i < N * N; i++) {
if (c_h[i] != d_h[i]) {
printf("Error: CPU and GPU results do not match\n");
printf("c_h: %i, d_h: %i, i: %li\n", c_h[i], d_h[i], i);
break;
}
}
// Free memory
free(a_h);
free(b_h);
free(c_h);
free(d_h);
cudaFree(a_d);
cudaFree(b_d);
cudaFree(c_d);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
|
omp_parallel_default.c | <ompts:test>
<ompts:testdescription>Test which checks the default option of the parallel construct.</ompts:testdescription>
<ompts:ompversion>3.0</ompts:ompversion>
<ompts:directive>omp parallel default</ompts:directive>
<ompts:testcode>
#include <stdio.h>
#include <unistd.h>
#include "omp_testsuite.h"
int <ompts:testcode:functionname>omp_parallel_default</ompts:testcode:functionname> (FILE * logFile)
{
<ompts:orphan:vars>
int i;
int sum;
int mysum;
</ompts:orphan:vars>
int known_sum;
sum =0;
known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2 ;
<ompts:orphan>
#pragma omp parallel <ompts:check>default(shared)</ompts:check> private(i) private(mysum<ompts:crosscheck>,sum</ompts:crosscheck>)
{
mysum = 0;
#pragma omp for
for (i = 1; i <= LOOPCOUNT; i++)
{
mysum = mysum + i;
}
#pragma omp critical
{
sum = sum + mysum;
} /* end of critical */
} /* end of parallel */
</ompts:orphan>
if (known_sum != sum) {
fprintf(logFile, "KNOWN_SUM = %d; SUM = %d\n", known_sum, sum);
}
return (known_sum == sum);
}
</ompts:testcode>
</ompts:test>
|
firstlastprivate.c | #include <stdio.h>
#include <omp.h>
#include <assert.h>
int main()
{
int i,sum=77;
int num_steps=100;
omp_set_num_threads(4);
#pragma omp parallel for firstprivate (sum) lastprivate (sum)
for(i=1;i<=num_steps;i++)
{
sum=sum + i;
}
printf("sum=%d\n",sum);
return 0;
}
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/ASTConcept.h"
#include "clang/AST/ASTFwd.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Availability.h"
#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/TypeLoc.h"
#include "clang/APINotes/APINotesManager.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenCLOptions.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/CleanupInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/SemaConcept.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include <deque>
#include <functional>
#include <memory>
#include <string>
#include <tuple>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
struct InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class ParsedAttr;
class BindingDecl;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class CoroutineBodyStmt;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPRequiresDecl;
class OMPDeclareReductionDecl;
class OMPDeclareSimdDecl;
class OMPClause;
struct OMPVarListLocTy;
struct OverloadCandidate;
enum class OverloadCandidateParamOrder : char;
enum OverloadCandidateRewriteKind : unsigned;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateInstantiationCallback;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class Capture;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class SemaPPCallbacks;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// The end location for the first pointer declarator in the file. Used for
/// placing fix-its.
SourceLocation PointerEndLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
/// Keeps track of expected type during expression parsing. The type is tied to
/// a particular token, all functions that update or consume the type take a
/// start location of the token they are looking at as a parameter. This allows
/// to avoid updating the type on hot paths in the parser.
class PreferredTypeBuilder {
public:
PreferredTypeBuilder() = default;
explicit PreferredTypeBuilder(QualType Type) : Type(Type) {}
void enterCondition(Sema &S, SourceLocation Tok);
void enterReturn(Sema &S, SourceLocation Tok);
void enterVariableInit(SourceLocation Tok, Decl *D);
/// Computing a type for the function argument may require running
/// overloading, so we postpone its computation until it is actually needed.
///
/// Clients should be very careful when using this funciton, as it stores a
/// function_ref, clients should make sure all calls to get() with the same
/// location happen while function_ref is alive.
void enterFunctionArgument(SourceLocation Tok,
llvm::function_ref<QualType()> ComputeType);
void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc);
void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind,
SourceLocation OpLoc);
void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op);
void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base);
void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS);
/// Handles all type casts, including C-style cast, C++ casts, etc.
void enterTypeCast(SourceLocation Tok, QualType CastType);
QualType get(SourceLocation Tok) const {
if (Tok != ExpectedLoc)
return QualType();
if (!Type.isNull())
return Type;
if (ComputeType)
return ComputeType();
return QualType();
}
private:
/// Start position of a token for which we store expected type.
SourceLocation ExpectedLoc;
/// Expected type for a token starting at ExpectedLoc.
QualType Type;
/// A function to compute expected type at ExpectedLoc. It is only considered
/// if Type is null.
llvm::function_ref<QualType()> ComputeType;
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema final {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
/// A key method to reduce duplicate debug info from Sema.
virtual void anchor();
///Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
/// Determine whether two declarations should be linked together, given that
/// the old declaration might not be visible and the new declaration might
/// not have external linkage.
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
if (isVisible(Old))
return true;
// See comment in below overload for why it's safe to compute the linkage
// of the new declaration here.
if (New->isExternallyDeclarable()) {
assert(Old->isExternallyDeclarable() &&
"should not have found a non-externally-declarable previous decl");
return true;
}
return false;
}
bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New);
void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem,
QualType ResultTy,
ArrayRef<QualType> Args);
public:
/// The maximum alignment, same as in llvm::Value. We duplicate them here
/// because that allows us not to duplicate the constants in clang code,
/// which we must to since we can't directly use the llvm constants.
/// The value is verified against llvm here: lib/CodeGen/CGDecl.cpp
///
/// This is the greatest alignment value supported by load, store, and alloca
/// instructions, and global values.
static const unsigned MaxAlignmentExponent = 29;
static const unsigned MaximumAlignment = 1u << MaxAlignmentExponent;
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions CurFPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
api_notes::APINotesManager APINotes;
/// Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
/// Holds TypoExprs that are created from `createDelayedTypo`. This is used by
/// `TransformTypos` in order to keep track of any TypoExprs that are created
/// recursively during typo correction and wipe them away if the correction
/// fails.
llvm::SmallVector<TypoExpr *, 2> TypoExprs;
/// pragma clang section kind
enum PragmaClangSectionKind {
PCSK_Invalid = 0,
PCSK_BSS = 1,
PCSK_Data = 2,
PCSK_Rodata = 3,
PCSK_Text = 4,
PCSK_Relro = 5
};
enum PragmaClangSectionAction {
PCSA_Set = 0,
PCSA_Clear = 1
};
struct PragmaClangSection {
std::string SectionName;
bool Valid = false;
SourceLocation PragmaLocation;
void Act(SourceLocation PragmaLocation,
PragmaClangSectionAction Action,
StringLiteral* Name);
};
PragmaClangSection PragmaClangBSSSection;
PragmaClangSection PragmaClangDataSection;
PragmaClangSection PragmaClangRodataSection;
PragmaClangSection PragmaClangRelroSection;
PragmaClangSection PragmaClangTextSection;
enum PragmaMsStackAction {
PSK_Reset = 0x0, // #pragma ()
PSK_Set = 0x1, // #pragma (value)
PSK_Push = 0x2, // #pragma (push[, id])
PSK_Pop = 0x4, // #pragma (pop[, id])
PSK_Show = 0x8, // #pragma (show) -- only for "pack"!
PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value)
PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value)
};
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
SourceLocation PragmaPushLocation;
Slot(llvm::StringRef StackSlotLabel, ValueType Value,
SourceLocation PragmaLocation, SourceLocation PragmaPushLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation),
PragmaPushLocation(PragmaPushLocation) {}
};
void Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
ValueType Value);
// MSVC seems to add artificial slots to #pragma stacks on entering a C++
// method body to restore the stacks on exit, so it works like this:
//
// struct S {
// #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>)
// void Method {}
// #pragma <name>(pop, InternalPragmaSlot)
// };
//
// It works even with #pragma vtordisp, although MSVC doesn't support
// #pragma vtordisp(push [, id], n)
// syntax.
//
// Push / pop a named sentinel slot.
void SentinelAction(PragmaMsStackAction Action, StringRef Label) {
assert((Action == PSK_Push || Action == PSK_Pop) &&
"Can only push / pop #pragma stack sentinels!");
Act(CurrentPragmaLocation, Action, Label, CurrentValue);
}
// Constructors.
explicit PragmaStack(const ValueType &Default)
: DefaultValue(Default), CurrentValue(Default) {}
bool hasValue() const { return CurrentValue != DefaultValue; }
SmallVector<Slot, 2> Stack;
ValueType DefaultValue; // Value used for PSK_Reset action.
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
/// Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
PragmaStack<MSVtorDispMode> VtorDispStack;
// #pragma pack.
// Sentinel to represent when the stack is set to mac68k alignment.
static const unsigned kMac68kAlignmentSentinel = ~0U;
PragmaStack<unsigned> PackStack;
// The current #pragma pack values and locations at each #include.
struct PackIncludeState {
unsigned CurrentValue;
SourceLocation CurrentPragmaLocation;
bool HasNonDefaultValue, ShouldWarnOnInclude;
};
SmallVector<PackIncludeState, 8> PackIncludeStack;
// Segment #pragmas.
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
// This stack tracks the current state of Sema.CurFPFeatures.
PragmaStack<unsigned> FpPragmaStack;
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
public:
PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct);
~PragmaStackSentinelRAII();
private:
Sema &S;
StringRef SlotLabel;
bool ShouldAct;
};
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// This an attribute introduced by \#pragma clang attribute.
struct PragmaAttributeEntry {
SourceLocation Loc;
ParsedAttr *Attribute;
SmallVector<attr::SubjectMatchRule, 4> MatchRules;
bool IsUsed;
};
/// A push'd group of PragmaAttributeEntries.
struct PragmaAttributeGroup {
/// The location of the push attribute.
SourceLocation Loc;
/// The namespace of this push group.
const IdentifierInfo *Namespace;
SmallVector<PragmaAttributeEntry, 2> Entries;
};
SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack;
/// The declaration that is currently receiving an attribute from the
/// #pragma attribute stack.
const Decl *PragmaAttributeCurrentTargetDecl;
/// This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// Used to control the generation of ExprWithCleanups.
CleanupInfo Cleanup;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression.
SmallVector<ExprWithCleanups::CleanupObject, 8> ExprCleanupObjects;
/// Store a set of either DeclRefExprs or MemberExprs that contain a reference
/// to a variable (constant) that may or may not be odr-used in this Expr, and
/// we won't know until all lvalue-to-rvalue and discarded value conversions
/// have been applied to all subexpressions of the enclosing full expression.
/// This is cleared at the end of each full expression.
using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>;
MaybeODRUseExprSet MaybeODRUseExprs;
std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope;
/// Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
/// The index of the first FunctionScope that corresponds to the current
/// context.
unsigned FunctionScopesStart = 0;
ArrayRef<sema::FunctionScopeInfo*> getFunctionScopes() const {
return llvm::makeArrayRef(FunctionScopes.begin() + FunctionScopesStart,
FunctionScopes.end());
}
/// Stack containing information needed when in C++2a an 'auto' is encountered
/// in a function declaration parameter type specifier in order to invent a
/// corresponding template parameter in the enclosing abbreviated function
/// template. This information is also present in LambdaScopeInfo, stored in
/// the FunctionScopes stack.
SmallVector<InventedTemplateParameterInfo, 4> InventedParameterInfos;
/// The index of the first InventedParameterInfo that refers to the current
/// context.
unsigned InventedParameterInfosStart = 0;
ArrayRef<InventedTemplateParameterInfo> getInventedParameterInfos() const {
return llvm::makeArrayRef(InventedParameterInfos.begin() +
InventedParameterInfosStart,
InventedParameterInfos.end());
}
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType;
/// Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
/// All the external declarations encoutered and used in the TU.
SmallVector<VarDecl *, 4> ExternalDeclarations;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedOverridingExceptionSpecChecks;
/// All the function redeclarations seen during a class definition that had
/// their exception spec checks delayed, plus the prior declaration they
/// should be checked against. Except during error recovery, the new decl
/// should always be a friend declaration, as that's the only valid way to
/// redeclare a special member before its class is complete.
SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2>
DelayedEquivalentExceptionSpecChecks;
typedef llvm::MapVector<const FunctionDecl *,
std::unique_ptr<LateParsedTemplate>>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
/// \brief Callback to the parser to parse a type expressed as a string.
std::function<TypeResult(StringRef, StringRef, SourceLocation)>
ParseTypeFromStringCallback;
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
unsigned SavedFunctionScopesStart;
unsigned SavedInventedParameterInfosStart;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride),
SavedFunctionScopesStart(S.FunctionScopesStart),
SavedInventedParameterInfosStart(S.InventedParameterInfosStart)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
// Any saved FunctionScopes do not refer to this context.
S.FunctionScopesStart = S.FunctionScopes.size();
S.InventedParameterInfosStart = S.InventedParameterInfos.size();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
S.FunctionScopesStart = SavedFunctionScopesStart;
S.InventedParameterInfosStart = SavedInventedParameterInfosStart;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// Whether the AST is currently being rebuilt to correct immediate
/// invocations. Immediate invocation candidates and references to consteval
/// functions aren't tracked when this is set.
bool RebuildingImmediateInvocation = false;
/// Used to change context to isConstantEvaluated without pushing a heavy
/// ExpressionEvaluationContextRecord object.
bool isConstantEvaluatedOverride;
bool isConstantEvaluated() {
return ExprEvalContexts.back().isConstantEvaluated() ||
isConstantEvaluatedOverride;
}
/// RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
bool PushedCodeSynthesisContext = false;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC) {
S.PushFunctionScope();
S.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
if (auto *FD = dyn_cast<FunctionDecl>(DC))
FD->setWillHaveBody(true);
else
assert(isa<ObjCMethodDecl>(DC));
}
void addContextNote(SourceLocation UseLoc) {
assert(!PushedCodeSynthesisContext);
Sema::CodeSynthesisContext Ctx;
Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction;
Ctx.PointOfInstantiation = UseLoc;
Ctx.Entity = cast<Decl>(S.CurContext);
S.pushCodeSynthesisContext(Ctx);
PushedCodeSynthesisContext = true;
}
~SynthesizedFunctionScope() {
if (PushedCodeSynthesisContext)
S.popCodeSynthesisContext();
if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext))
FD->setWillHaveBody(false);
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// The C++ "std::align_val_t" enum class, which is defined by the C++
/// standard library.
LazyDeclPtr StdAlignValT;
/// The C++ "std::experimental" namespace, where the experimental parts
/// of the standard library resides.
NamespaceDecl *StdExperimentalNamespaceCache;
/// The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// The C++ "std::coroutine_traits" template, which is defined in
/// \<coroutine_traits>
ClassTemplateDecl *StdCoroutineTraitsCache;
/// The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
/// The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// id<NSCopying> type.
QualType QIDNSCopying;
/// will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// A flag to indicate that we're in a context that permits abstract
/// references to fields. This is really a
bool AllowAbstractFieldReference;
/// Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum class ExpressionEvaluationContext {
/// The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// The current expression occurs within a braced-init-list within
/// an unevaluated operand. This is mostly like a regular unevaluated
/// context, except that we still instantiate constexpr functions that are
/// referenced here so that we can perform narrowing checks correctly.
UnevaluatedList,
/// The current expression occurs within a discarded statement.
/// This behaves largely similarly to an unevaluated operand in preventing
/// definitions from being required, but not in other ways.
DiscardedStatement,
/// The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
using ImmediateInvocationCandidate = llvm::PointerIntPair<ConstantExpr *, 1>;
/// Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// The expression evaluation context.
ExpressionEvaluationContext Context;
/// Whether the enclosing context needed a cleanup.
CleanupInfo ParentCleanup;
/// Whether we are in a decltype expression.
bool IsDecltype;
/// The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
MaybeODRUseExprSet SavedMaybeODRUseExprs;
/// The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs;
/// Expressions appearing as the LHS of a volatile assignment in this
/// context. We produce a warning for these when popping the context if
/// they are not discarded-value expressions nor unevaluated operands.
SmallVector<Expr*, 2> VolatileAssignmentLHSs;
/// Set of candidates for starting an immediate invocation.
llvm::SmallVector<ImmediateInvocationCandidate, 4> ImmediateInvocationCandidates;
/// Set of DeclRefExprs referencing a consteval function when used in a
/// context not already known to be immediately invoked.
llvm::SmallPtrSet<DeclRefExpr *, 4> ReferenceToConsteval;
/// \brief Describes whether we are in an expression constext which we have
/// to handle differently.
enum ExpressionKind {
EK_Decltype, EK_TemplateArgument, EK_Other
} ExprContext;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
CleanupInfo ParentCleanup,
Decl *ManglingContextDecl,
ExpressionKind ExprContext)
: Context(Context), ParentCleanup(ParentCleanup),
NumCleanupObjects(NumCleanupObjects), NumTypos(0),
ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext) {}
bool isUnevaluated() const {
return Context == ExpressionEvaluationContext::Unevaluated ||
Context == ExpressionEvaluationContext::UnevaluatedAbstract ||
Context == ExpressionEvaluationContext::UnevaluatedList;
}
bool isConstantEvaluated() const {
return Context == ExpressionEvaluationContext::ConstantEvaluated;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// Emit a warning for all pending noderef expressions that we recorded.
void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec);
/// Compute the mangling number context for a lambda expression or
/// block literal. Also return the extra mangling decl if any.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
std::tuple<MangleNumberingContext *, Decl *>
getCurrentMangleNumberContext(const DeclContext *DC);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult() : Pair() {}
SpecialMemberOverloadResult(CXXMethodDecl *MD)
: Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
class SpecialMemberOverloadResultEntry
: public llvm::FastFoldingSetNode,
public SpecialMemberOverloadResult {
public:
SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
};
/// A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache;
/// A cache of the flags available in enumerations with the flag_bits
/// attribute.
mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache;
/// The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Determine if VD, which must be a variable or function, is an external
/// symbol that nonetheless can't be referenced from outside this translation
/// unit because its type has no linkage and it's not extern "C".
bool isExternalWithNoLinkageType(ValueDecl *VD);
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// List of SourceLocations where 'self' is implicitly retained inside a
/// block.
llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1>
ImplicitlyRetainedSelfLocs;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember>
SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
/// Kinds of defaulted comparison operator functions.
enum class DefaultedComparisonKind : unsigned char {
/// This is not a defaultable comparison operator.
None,
/// This is an operator== that should be implemented as a series of
/// subobject comparisons.
Equal,
/// This is an operator<=> that should be implemented as a series of
/// subobject comparisons.
ThreeWay,
/// This is an operator!= that should be implemented as a rewrite in terms
/// of a == comparison.
NotEqual,
/// This is an <, <=, >, or >= that should be implemented as a rewrite in
/// terms of a <=> comparison.
Relational,
};
/// The function definitions which were renamed as part of typo-correction
/// to match their respective declarations. We want to keep track of them
/// to ensure that we don't emit a "redefinition" error if we encounter a
/// correctly named definition after the renamed definition.
llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions;
/// Stack of types that correspond to the parameter entities that are
/// currently being copy-initialized. Can be empty.
llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes;
void ReadMethodPool(Selector Sel);
void updateOutOfDateSelector(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the CurFPFeatures state on entry/exit of compound
/// statements.
class FPFeaturesStateRAII {
public:
FPFeaturesStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.CurFPFeatures) {}
~FPFeaturesStateRAII() { S.CurFPFeatures = OldFPFeaturesState; }
private:
Sema& S;
FPOptions OldFPFeaturesState;
};
void addImplicitTypedef(StringRef Name, QualType T);
bool WarnedStackExhausted = false;
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getCurFPFeatures() { return CurFPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// Warn that the stack is nearly exhausted.
void warnStackExhausted(SourceLocation Loc);
/// Run some code with "sufficient" stack space. (Currently, at least 256K is
/// guaranteed). Produces a warning if we're low on stack space and allocates
/// more in that case. Use this in code that may recurse deeply (for example,
/// in template instantiation) to avoid stack overflow.
void runWithSufficientStackSpace(SourceLocation Loc,
llvm::function_ref<void()> Fn);
/// Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. SemaDiagnosticBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class SemaDiagnosticBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { }
// This is a cunning lie. DiagnosticBuilder actually performs move
// construction in its copy constructor (but due to varied uses, it's not
// possible to conveniently express this as actual move construction). So
// the default copy ctor here is fine, because the base class disables the
// source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op
// in that case anwyay.
SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default;
~SemaDiagnosticBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First flush the underlying
// DiagnosticBuilder data, and clear the diagnostic builder itself so it
// won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
FlushCounts();
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template<typename T>
friend const SemaDiagnosticBuilder &operator<<(
const SemaDiagnosticBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
};
/// Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
return SemaDiagnosticBuilder(DB, *this, DiagID);
}
/// Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD);
/// Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
/// Invent a new identifier for parameters of abbreviated templates.
IdentifierInfo *
InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName,
unsigned Index);
void emitAndClearUnusedLocalTypedefWarnings();
private:
/// Function or variable declarations to be checked for whether the deferred
/// diagnostics should be emitted.
SmallVector<Decl *, 4> DeclsToCheckForDeferredDiags;
public:
// Emit all deferred diagnostics.
void emitDeferredDiags();
enum TUFragmentKind {
/// The global module fragment, between 'module;' and a module-declaration.
Global,
/// A normal translation unit fragment. For a non-module unit, this is the
/// entire translation unit. Otherwise, it runs from the module-declaration
/// to the private-module-fragment (if any) or the end of the TU (if not).
Normal,
/// The private module fragment, between 'module :private;' and the end of
/// the translation unit.
Private
};
void ActOnStartOfTranslationUnit();
void ActOnEndOfTranslationUnit();
void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind);
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD, CapturedRegionKind K,
unsigned OpenMPCaptureLevel = 0);
/// Custom deleter to allow FunctionScopeInfos to be kept alive for a short
/// time after they've been popped.
class PoppedFunctionScopeDeleter {
Sema *Self;
public:
explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {}
void operator()(sema::FunctionScopeInfo *Scope) const;
};
using PoppedFunctionScopePtr =
std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>;
PoppedFunctionScopePtr
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
QualType BlockType = QualType());
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.empty() ? nullptr : FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const;
void setFunctionHasBranchIntoScope();
void setFunctionHasBranchProtectedScope();
void setFunctionHasIndirectGoto();
void PushCompoundScope(bool IsStmtExpr);
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// Get the innermost lambda enclosing the current location, if any. This
/// looks through intervening non-lambda scopes such as local functions and
/// blocks.
sema::LambdaScopeInfo *getEnclosingLambda() const;
/// Retrieve the current lambda scope info, if any.
/// \param IgnoreNonLambdaCapturingScope true if should find the top-most
/// lambda scope info ignoring all inner capturing scopes that are not
/// lambda scopes.
sema::LambdaScopeInfo *
getCurLambda(bool IgnoreNonLambdaCapturingScope = false);
/// Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
/// Called before parsing a function declarator belonging to a function
/// declaration.
void ActOnStartFunctionDeclarationDeclarator(Declarator &D,
unsigned TemplateParameterDepth);
/// Called after parsing a function declarator belonging to a function
/// declaration.
void ActOnFinishFunctionDeclarationDeclarator(Declarator &D);
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
QualType BuildMatrixType(QualType T, Expr *NumRows, Expr *NumColumns,
SourceLocation AttrLoc);
QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace,
SourceLocation AttrLoc);
/// Same as above, but constructs the AddressSpace index if not provided.
QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
SourceLocation AttrLoc);
bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
QualType BuildReadPipeType(QualType T,
SourceLocation Loc);
QualType BuildWritePipeType(QualType T,
SourceLocation Loc);
QualType BuildExtIntType(bool IsUnsigned, Expr *BitWidth, SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
/// Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Stmt *E);
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool handlerCanCatch(QualType HandlerType, QualType ExceptionType);
bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const PartialDiagnostic &NoThrowDiagID,
const FunctionProtoType *Superset,
SourceLocation SuperLoc,
const FunctionProtoType *Subset,
SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Target,
SourceLocation TargetLoc,
const FunctionProtoType *Source,
SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
TypeDiagnoser() {}
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
protected:
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
std::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(), DiagID(DiagID), Args(Args...) {
assert(DiagID != 0 && "no diagnostic for type diagnoser");
}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, std::index_sequence_for<Ts...>());
DB << T;
}
};
/// Do a check to make sure \p Name looks like a legal swift_name
/// attribute for the decl \p D. Raise a diagnostic if the name is invalid
/// for the given declaration.
///
/// For a function, this will validate a compound Swift name,
/// e.g. <code>init(foo:bar:baz:)</code> or <code>controllerForName(_:)</code>,
/// and the function will output the number of parameter names, and whether
/// this is a single-arg initializer.
///
/// For a type, enum constant, property, or variable declaration, this will
/// validate either a simple identifier, or a qualified
/// <code>context.identifier</code> name.
///
/// \returns true if the name is a valid swift name for \p D, false otherwise.
bool DiagnoseSwiftName(Decl *D, StringRef Name,
SourceLocation ArgLoc,
const IdentifierInfo *AttrName);
/// A derivative of BoundTypeDiagnoser for which the diagnostic's type
/// parameter is preceded by a 0/1 enum that is 1 if the type is sizeless.
/// For example, a diagnostic with no other parameters would generally have
/// the form "...%select{incomplete|sizeless}0 type %1...".
template <typename... Ts>
class SizelessTypeDiagnoser : public BoundTypeDiagnoser<Ts...> {
public:
SizelessTypeDiagnoser(unsigned DiagID, const Ts &... Args)
: BoundTypeDiagnoser<Ts...>(DiagID, Args...) {}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, this->DiagID);
this->emit(DB, std::index_sequence_for<Ts...>());
DB << T->isSizelessType() << T;
}
};
enum class CompleteTypeKind {
/// Apply the normal rules for complete types. In particular,
/// treat all sizeless types as incomplete.
Normal,
/// Relax the normal rules for complete types so that they include
/// sizeless built-in types.
AcceptSizeless,
// FIXME: Eventually we should flip the default to Normal and opt in
// to AcceptSizeless rather than opt out of it.
Default = AcceptSizeless
};
private:
/// Methods for marking which expressions involve dereferencing a pointer
/// marked with the 'noderef' attribute. Expressions are checked bottom up as
/// they are parsed, meaning that a noderef pointer may not be accessed. For
/// example, in `&*p` where `p` is a noderef pointer, we will first parse the
/// `*p`, but need to check that `address of` is called on it. This requires
/// keeping a container of all pending expressions and checking if the address
/// of them are eventually taken.
void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E);
void CheckAddressOfNoDeref(const Expr *E);
void CheckMemberAccessOfNoDeref(const MemberExpr *E);
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, TypeDiagnoser *Diagnoser);
struct ModuleScope {
SourceLocation BeginLoc;
clang::Module *Module = nullptr;
bool ModuleInterface = false;
bool ImplicitGlobalModuleFragment = false;
VisibleModuleSet OuterVisibleModules;
};
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
/// Namespace definitions that we will export when they finish.
llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces;
/// Get the module whose scope we are currently within.
Module *getCurrentModule() const {
return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module;
}
VisibleModuleSet VisibleModules;
public:
/// Get the module owning an entity.
Module *getOwningModule(const Decl *Entity) {
return Entity->getOwningModule();
}
/// Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND);
bool isModuleVisible(const Module *M, bool ModulePrivate = false);
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return !D->isHidden() || isVisibleSlow(D);
}
/// Determine whether any declaration of an entity is visible.
bool
hasVisibleDeclaration(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr) {
return isVisible(D) || hasVisibleDeclarationSlow(D, Modules);
}
bool hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules);
bool hasVisibleMergedDefinition(NamedDecl *Def);
bool hasMergedDefinitionInCurrentModule(NamedDecl *Def);
/// Determine if \p D and \p Suggested have a structurally compatible
/// layout as described in C11 6.2.7/1.
bool hasStructuralCompatLayout(Decl *D, Decl *Suggested);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is an explicit
/// specialization declaration for a specialization of a template. (For a
/// member specialization, use hasVisibleMemberSpecialization.)
bool hasVisibleExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is a member
/// specialization declaration (as opposed to an instantiated declaration).
bool hasVisibleMemberSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if \p A and \p B are equivalent internal linkage declarations
/// from different modules, and thus an ambiguity error can be downgraded to
/// an extension warning.
bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
const NamedDecl *B);
void diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D,
ArrayRef<const NamedDecl *> Equiv);
bool isUsualDeallocationFunction(const CXXMethodDecl *FD);
bool isCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind = CompleteTypeKind::Default) {
return !RequireCompleteTypeImpl(Loc, T, Kind, nullptr);
}
bool RequireCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, unsigned DiagID);
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser) {
return RequireCompleteType(Loc, T, CompleteTypeKind::Default, Diagnoser);
}
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID) {
return RequireCompleteType(Loc, T, CompleteTypeKind::Default, DiagID);
}
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
template <typename... Ts>
bool RequireCompleteSizedType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &... Args) {
SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, CompleteTypeKind::Normal, Diagnoser);
}
void completeExprArrayBound(Expr *E);
bool RequireCompleteExprType(Expr *E, CompleteTypeKind Kind,
TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, CompleteTypeKind::Default, Diagnoser);
}
template <typename... Ts>
bool RequireCompleteSizedExprType(Expr *E, unsigned DiagID,
const Ts &... Args) {
SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, CompleteTypeKind::Normal, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T,
TagDecl *OwnedTagDecl = nullptr);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
struct SkipBodyInfo {
SkipBodyInfo()
: ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr),
New(nullptr) {}
bool ShouldSkip;
bool CheckSameAsPrevious;
NamedDecl *Previous;
NamedDecl *New;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false, bool HasTrailingDot = false,
ParsedType ObjectType = nullptr,
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
bool IsClassTemplateDeductionContext = true,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool IsTemplateName = false);
/// Attempt to behave like MSVC in situations where lookup of an unqualified
/// type name has failed in a dependent context. In these situations, we
/// automatically form a DependentTypeName that will retry lookup in a related
/// scope during instantiation.
ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
SourceLocation NameLoc,
bool IsTemplateTypeArg);
/// Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
/// This name is not a type or template in this context, but might be
/// something else.
NC_Unknown,
/// Classification failed; an error has been produced.
NC_Error,
/// The name has been typo-corrected to a keyword.
NC_Keyword,
/// The name was classified as a type.
NC_Type,
/// The name was classified as a specific non-type, non-template
/// declaration. ActOnNameClassifiedAsNonType should be called to
/// convert the declaration to an expression.
NC_NonType,
/// The name was classified as an ADL-only function name.
/// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the
/// result to an expression.
NC_UndeclaredNonType,
/// The name denotes a member of a dependent type that could not be
/// resolved. ActOnNameClassifiedAsDependentNonType should be called to
/// convert the result to an expression.
NC_DependentNonType,
/// The name was classified as a non-type, and an expression representing
/// that name has been formed.
NC_ContextIndependentExpr,
/// The name was classified as a template whose specializations are types.
NC_TypeTemplate,
/// The name was classified as a variable template name.
NC_VarTemplate,
/// The name was classified as a function template name.
NC_FunctionTemplate,
/// The name was classified as an ADL-only function template name.
NC_UndeclaredTemplate,
/// The name was classified as a concept name.
NC_Concept,
};
class NameClassification {
NameClassificationKind Kind;
union {
ExprResult Expr;
NamedDecl *NonTypeDecl;
TemplateName Template;
ParsedType Type;
};
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {}
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification ContextIndependentExpr(ExprResult E) {
NameClassification Result(NC_ContextIndependentExpr);
Result.Expr = E;
return Result;
}
static NameClassification NonType(NamedDecl *D) {
NameClassification Result(NC_NonType);
Result.NonTypeDecl = D;
return Result;
}
static NameClassification UndeclaredNonType() {
return NameClassification(NC_UndeclaredNonType);
}
static NameClassification DependentNonType() {
return NameClassification(NC_DependentNonType);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
static NameClassification Concept(TemplateName Name) {
NameClassification Result(NC_Concept);
Result.Template = Name;
return Result;
}
static NameClassification UndeclaredTemplate(TemplateName Name) {
NameClassification Result(NC_UndeclaredTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ExprResult getExpression() const {
assert(Kind == NC_ContextIndependentExpr);
return Expr;
}
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
NamedDecl *getNonTypeDecl() const {
assert(Kind == NC_NonType);
return NonTypeDecl;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate || Kind == NC_Concept ||
Kind == NC_UndeclaredTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
case NC_Concept:
return TNK_Concept_template;
case NC_UndeclaredTemplate:
return TNK_Undeclared_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo *&Name, SourceLocation NameLoc,
const Token &NextToken,
CorrectionCandidateCallback *CCC = nullptr);
/// Act on the result of classifying a name as an undeclared (ADL-only)
/// non-type declaration.
ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name,
SourceLocation NameLoc);
/// Act on the result of classifying a name as an undeclared member of a
/// dependent base class.
ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsAddressOfOperand);
/// Act on the result of classifying a name as a specific non-type
/// declaration.
ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS,
NamedDecl *Found,
SourceLocation NameLoc,
const Token &NextToken);
/// Describes the detailed kind of a template name. Used in diagnostics.
enum class TemplateNameKindForDiagnostics {
ClassTemplate,
FunctionTemplate,
VarTemplate,
AliasTemplate,
TemplateTemplateParam,
Concept,
DependentTemplate
};
TemplateNameKindForDiagnostics
getTemplateNameKindForDiagnostics(TemplateName Name);
/// Determine whether it's plausible that E was intended to be a
/// template-name.
bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) {
if (!getLangOpts().CPlusPlus || E.isInvalid())
return false;
Dependent = false;
if (auto *DRE = dyn_cast<DeclRefExpr>(E.get()))
return !DRE->hasExplicitTemplateArgs();
if (auto *ME = dyn_cast<MemberExpr>(E.get()))
return !ME->hasExplicitTemplateArgs();
Dependent = true;
if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get()))
return !DSDRE->hasExplicitTemplateArgs();
if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get()))
return !DSME->hasExplicitTemplateArgs();
// Any additional cases recognized here should also be handled by
// diagnoseExprIntendedAsTemplateName.
return false;
}
void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
SourceLocation Less,
SourceLocation Greater);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name, SourceLocation Loc,
bool IsTemplateId);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation(),
SourceLocation UnalignedQualLoc = SourceLocation());
void diagnosePointerAuthDisabled(SourceLocation loc, SourceRange range);
bool checkConstantPointerAuthKey(Expr *keyExpr, unsigned &key);
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R);
NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R);
void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R);
void CheckShadow(Scope *S, VarDecl *D);
/// Warn if 'E', which is an expression that is about to be modified, refers
/// to a shadowing declaration.
void CheckShadowingDeclModification(Expr *E, SourceLocation Loc);
void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI);
private:
/// Map of current shadowing declarations to shadowed declarations. Warn if
/// it looks like the user is trying to modify the shadowing declaration.
llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls;
public:
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = None);
NamedDecl *
ActOnDecompositionDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *Init);
void CheckCompleteVariableDeclaration(VarDecl *VD);
void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
enum class CheckConstexprKind {
/// Diagnose issues that are non-constant or that are extensions.
Diagnose,
/// Identify whether this function satisfies the formal rules for constexpr
/// functions in the current lanugage mode (with no extensions).
CheckValid
};
bool CheckConstexprFunctionDefinition(const FunctionDecl *FD,
CheckConstexprKind Kind);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsMemberSpecialization);
bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl);
bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD,
QualType NewT, QualType OldT);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD,
bool IsDefinition);
void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
QualType adjustParameterTypeForObjCAutoRefCount(QualType T,
SourceLocation NameLoc,
TypeSourceInfo *TSInfo);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param,
SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
// Contexts where using non-trivial C union types can be disallowed. This is
// passed to err_non_trivial_c_union_in_invalid_context.
enum NonTrivialCUnionContext {
// Function parameter.
NTCUC_FunctionParam,
// Function return.
NTCUC_FunctionReturn,
// Default-initialized object.
NTCUC_DefaultInitializedObject,
// Variable with automatic storage duration.
NTCUC_AutoVar,
// Initializer expression that might copy from another object.
NTCUC_CopyInit,
// Assignment.
NTCUC_Assignment,
// Compound literal.
NTCUC_CompoundLiteral,
// Block capture.
NTCUC_BlockCapture,
// lvalue-to-rvalue conversion of volatile type.
NTCUC_LValueToRValueVolatile,
};
/// Emit diagnostics if the initializer or any of its explicit or
/// implicitly-generated subexpressions require copying or
/// default-initializing a type that is or contains a C union type that is
/// non-trivial to copy or default-initialize.
void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc);
// These flags are passed to checkNonTrivialCUnion.
enum NonTrivialCUnionKind {
NTCUK_Init = 0x1,
NTCUK_Destruct = 0x2,
NTCUK_Copy = 0x4,
};
/// Emit diagnostics if a non-trivial C union type or a struct that contains
/// a non-trivial C union is used in an invalid context.
void checkNonTrivialCUnion(QualType QT, SourceLocation Loc,
NonTrivialCUnionContext UseContext,
unsigned NonTrivialKind);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit);
void ActOnUninitializedDecl(Decl *dcl);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void CheckStaticLocalForDllExport(VarDecl *VD);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(
FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D,
SkipBodyInfo *SkipBody = nullptr);
void ActOnStartTrailingRequiresClause(Scope *S, Declarator &D);
ExprResult ActOnFinishTrailingRequiresClause(ExprResult ConstraintExpr);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineFunctionDef(FunctionDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters);
/// Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void
DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters,
QualType ReturnTy, NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList,
SourceLocation SemiLoc);
enum class ModuleDeclKind {
Interface, ///< 'export module X;'
Implementation, ///< 'module X;'
};
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc,
SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path, bool IsFirstDecl);
/// The parser has processed a global-module-fragment declaration that begins
/// the definition of the global module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc);
/// The parser has processed a private-module-fragment declaration that begins
/// the definition of the private module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
/// \param PrivateLoc The location of the 'private' keyword.
DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc,
SourceLocation PrivateLoc);
/// The parser has processed a module import declaration.
///
/// \param StartLoc The location of the first token in the declaration. This
/// could be the location of an '@', 'export', or 'import'.
/// \param ExportLoc The location of the 'export' keyword, if any.
/// \param ImportLoc The location of the 'import' keyword.
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, ModuleIdPath Path);
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, Module *M,
ModuleIdPath Path = {});
/// The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument,
ExplicitSpecialization,
PartialSpecialization
};
/// Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
MissingImportKind MIK, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
SourceLocation LBraceLoc);
Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl,
SourceLocation RBraceLoc);
/// We've found a use of a templated declaration that would trigger an
/// implicit instantiation. Check that any relevant explicit specializations
/// and partial specializations are visible, and diagnose if not.
void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec);
/// We've found a use of a template specialization that would select a
/// partial specialization. Check that the partial specialization is visible,
/// and diagnose if not.
void checkPartialSpecializationVisibility(SourceLocation Loc,
NamedDecl *Spec);
/// Retrieve a suitable printing policy for diagnostics.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// Retrieve a suitable printing policy for diagnostics.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
/// Common ways to introduce type names without a tag for use in diagnostics.
/// Keep in sync with err_tag_reference_non_tag.
enum NonTagKind {
NTK_NonStruct,
NTK_NonClass,
NTK_NonUnion,
NTK_NonEnum,
NTK_Typedef,
NTK_TypeAlias,
NTK_Template,
NTK_TypeAliasTemplate,
NTK_TemplateTemplateArgument,
};
/// Given a non-tag type declaration, returns an enum useful for indicating
/// what kind of non-tag type this is.
NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc, const ParsedAttributesView &Attr,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl,
bool &IsDependent, SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart, Declarator &D,
Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
const ParsedAttr &MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
enum TrivialABIHandling {
/// The triviality of a method unaffected by "trivial_abi".
TAH_IgnoreTrivialABI,
/// The triviality of a method affected by "trivial_abi".
TAH_ConsiderTrivialABI
};
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
TrivialABIHandling TAH = TAH_IgnoreTrivialABI,
bool Diagnose = false);
/// For a defaulted function, the kind of defaulted function that it is.
class DefaultedFunctionKind {
CXXSpecialMember SpecialMember : 8;
DefaultedComparisonKind Comparison : 8;
public:
DefaultedFunctionKind()
: SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) {
}
DefaultedFunctionKind(CXXSpecialMember CSM)
: SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {}
DefaultedFunctionKind(DefaultedComparisonKind Comp)
: SpecialMember(CXXInvalid), Comparison(Comp) {}
bool isSpecialMember() const { return SpecialMember != CXXInvalid; }
bool isComparison() const {
return Comparison != DefaultedComparisonKind::None;
}
explicit operator bool() const {
return isSpecialMember() || isComparison();
}
CXXSpecialMember asSpecialMember() const { return SpecialMember; }
DefaultedComparisonKind asComparison() const { return Comparison; }
/// Get the index of this function kind for use in diagnostics.
unsigned getDiagnosticIndex() const {
static_assert(CXXInvalid > CXXDestructor,
"invalid should have highest index");
static_assert((unsigned)DefaultedComparisonKind::None == 0,
"none should be equal to zero");
return SpecialMember + (unsigned)Comparison;
}
};
DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) {
return getDefaultedFunctionKind(MD).asSpecialMember();
}
DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) {
return getDefaultedFunctionKind(FD).asComparison();
}
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields, SourceLocation LBrac,
SourceLocation RBrac, const ParsedAttributesView &AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
/// Perform ODR-like check for C/ObjC when merging tag types from modules.
/// Differently from C++, actually parse the body and reject / error out
/// in case of a structural mismatch.
bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
SkipBodyInfo &SkipBody);
typedef void *SkippedDefinitionContext;
/// Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceRange BraceRange);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, bool IsFixed,
const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
const ParsedAttributesView &Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S,
const ParsedAttributesView &Attr);
DeclContext *getContainingDC(DeclContext *DC);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// Don't merge availability attributes at all.
AMK_None,
/// Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override,
/// Merge availability attributes for an implementation of
/// a protocol requirement.
AMK_ProtocolImplementation,
};
/// Describes the kind of priority given to an availability attribute.
///
/// The sum of priorities deteremines the final priority of the attribute.
/// The final priority determines how the attribute will be merged.
/// An attribute with a lower priority will always remove higher priority
/// attributes for the specified platform when it is being applied. An
/// attribute with a higher priority will not be applied if the declaration
/// already has an availability attribute with a lower priority for the
/// specified platform. The final prirority values are not expected to match
/// the values in this enumeration, but instead should be treated as a plain
/// integer value. This enumeration just names the priority weights that are
/// used to calculate that final vaue.
enum AvailabilityPriority : int {
/// The availability attribute was specified explicitly next to the
/// declaration.
AP_Explicit = 0,
/// The availability attribute was applied using '#pragma clang attribute'.
AP_PragmaClangAttribute = 1,
/// The availability attribute for a specific platform was inferred from
/// an availability attribute for another platform.
AP_InferredFromOtherPlatform = 2
};
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *
mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Platform, bool Implicit,
VersionTuple Introduced, VersionTuple Deprecated,
VersionTuple Obsoleted, bool IsUnavailable,
StringRef Message, bool IsStrict, StringRef Replacement,
AvailabilityMergeKind AMK, int Priority);
TypeVisibilityAttr *
mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
TypeVisibilityAttr::VisibilityType Vis);
VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
VisibilityAttr::VisibilityType Vis);
UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef UuidAsWritten, MSGuidDecl *GuidDecl);
DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI);
DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI);
MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D,
const AttributeCommonInfo &CI,
bool BestCase,
MSInheritanceModel Model);
FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Format, int FormatIdx,
int FirstArg);
SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D,
const AttributeCommonInfo &CI,
const IdentifierInfo *Ident);
MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI);
NoSpeculativeLoadHardeningAttr *
mergeNoSpeculativeLoadHardeningAttr(Decl *D,
const NoSpeculativeLoadHardeningAttr &AL);
SpeculativeLoadHardeningAttr *
mergeSpeculativeLoadHardeningAttr(Decl *D,
const SpeculativeLoadHardeningAttr &AL);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D,
const AttributeCommonInfo &CI);
SwiftNameAttr *mergeSwiftNameAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name, bool Override);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D,
const InternalLinkageAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL);
WebAssemblyImportNameAttr *mergeImportNameAttr(
Decl *D, const WebAssemblyImportNameAttr &AL);
WebAssemblyImportModuleAttr *mergeImportModuleAttr(
Decl *D, const WebAssemblyImportModuleAttr &AL);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn);
void notePreviousDefinition(const NamedDecl *Old, SourceLocation New);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl,
bool ConsiderCudaAttrs = true,
bool ConsiderRequiresClauses = true);
enum class AllowedExplicit {
/// Allow no explicit functions to be used.
None,
/// Allow explicit conversion functions but not explicit constructors.
Conversions,
/// Allow both explicit conversion functions and explicit constructors.
All
};
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
AllowedExplicit AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess,
bool Diagnose = true);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO = true);
bool CanPerformAggregateInitializationForOverloadResolution(
const InitializedEntity &Entity, InitListExpr *From);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
/// Check that the lifetime of the initializer (and its subobjects) is
/// sufficient for initializing the entity, and perform lifetime extension
/// (when permitted) if not.
void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator.
CCEK_ConstexprIf, ///< Condition in a constexpr if statement.
CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE);
/// Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet;
using ADLCallKind = CallExpr::ADLCallKind;
void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = true,
bool AllowExplicitConversion = false,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool FirstArgumentIsBase = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false,
OverloadCandidateParamOrder PO = {});
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
OverloadCandidateParamOrder PO = {});
void AddTemplateOverloadCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false,
bool PartialOverloading = false, bool AllowExplicit = true,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
OverloadCandidateParamOrder PO = {});
bool CheckNonDependentConversions(
FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet,
ConversionSequenceList &Conversions, bool SuppressUserConversions,
CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(),
Expr::Classification ObjectClassification = {},
OverloadCandidateParamOrder PO = {});
void AddConversionCandidate(
CXXConversionDecl *Conversion, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddTemplateConversionCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddNonMemberOperatorCandidates(
const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
OverloadCandidateParamOrder PO = {});
void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(
NamedDecl *Found, FunctionDecl *Fn,
OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(),
QualType DestType = QualType(), bool TakingAddress = false);
// Emit as a series of 'note's all template and non-templates identified by
// the expression Expr
void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(),
bool TakingAddress = false);
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, SourceLocation CallLoc,
ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
/// Find the failed Boolean condition within a given Boolean
/// constant expression, and describe it with a string.
std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// non-ArgDependent DiagnoseIfAttrs.
///
/// Argument-dependent diagnose_if attributes should be checked each time a
/// function is used as a direct callee of a function call.
///
/// Returns true if any errors were emitted.
bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function,
const Expr *ThisArg,
ArrayRef<const Expr *> Args,
SourceLocation Loc);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// ArgDependent DiagnoseIfAttrs.
///
/// Argument-independent diagnose_if attributes should be checked on every use
/// of a function.
///
/// Returns true if any errors were emitted.
bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
SourceLocation Loc);
/// Returns whether the given function's address can be taken or not,
/// optionally emitting a diagnostic if the address can't be taken.
///
/// Returns false if taking the address of the function is illegal.
bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
bool Complain = false,
SourceLocation Loc = SourceLocation());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &FoundResult);
bool resolveAndFixAddressOfSingleOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConversion = false);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
SourceRange OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true,
bool CalleesAddressIsTaken=false);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *input, bool RequiresADL = true);
void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet,
OverloadedOperatorKind Op,
const UnresolvedSetImpl &Fns,
ArrayRef<Expr *> Args, bool RequiresADL = true);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
bool RequiresADL = true,
bool AllowRewrittenCandidates = true,
FunctionDecl *DefaultedFn = nullptr);
ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
FunctionDecl *DefaultedFn);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult
BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up a name following ~ in a destructor name. This is an ordinary
/// lookup, but prefers tags to typedefs.
LookupDestructorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// Look up the name of an OpenMP user-defined reduction operation.
LookupOMPReductionName,
/// Look up the name of an OpenMP user-defined mapper.
LookupOMPMapperName,
/// Look up any declaration with any name.
LookupAnyName
};
/// Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists and is visible.
ForVisibleRedeclaration,
/// The lookup results will be used for redeclaration of a name
/// with external linkage; non-visible lookup results with external linkage
/// may also be found.
ForExternalRedeclaration
};
RedeclarationKind forRedeclarationInCurContext() {
// A declaration with an owning module for linkage can never link against
// anything that is not visible. We don't need to check linkage here; if
// the context has internal linkage, redeclaration lookup won't find things
// from other TUs, and we can't safely compute linkage yet in general.
if (cast<Decl>(CurContext)
->getOwningModuleForLinkage(/*IgnoreLinkage*/true))
return ForVisibleRedeclaration;
return ForExternalRedeclaration;
}
/// The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// The lookup resulted in an error.
LOLR_Error,
/// The lookup found no match but no diagnostic was issued.
LOLR_ErrorNoDiagnostic,
/// The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplate
};
SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState &&other) noexcept;
TypoExprState &operator=(TypoExprState &&other) noexcept;
};
/// The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, SourceLocation TypoLoc);
// The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupBuiltin(LookupResult &R);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
QualType T1, QualType T2,
UnresolvedSetImpl &Functions);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id);
LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R,
ArrayRef<QualType> ArgTys,
bool AllowRaw,
bool AllowTemplate,
bool AllowStringTemplate,
bool DiagnoseMissing);
bool isKnownName(StringRef name);
/// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs.
enum class FunctionEmissionStatus {
Emitted,
CUDADiscarded, // Discarded due to CUDA/HIP hostness
OMPDiscarded, // Discarded due to OpenMP hostness
TemplateDiscarded, // Discarded due to uninstantiated templates
Unknown,
};
FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl,
bool Final = false);
// Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check.
bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool LoadExternal = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool IncludeDependentBases = false,
bool LoadExternal = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult
CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult
CorrectDelayedTyposInExpr(Expr *E,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(E, nullptr, Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(ER, nullptr, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
/// Attempts to produce a RecoveryExpr after some AST node cannot be created.
ExprResult CreateRecoveryExpr(SourceLocation Begin, SourceLocation End,
ArrayRef<Expr *> SubExprs,
QualType T = QualType());
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction(
FunctionDecl *FD);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
// Helper for delayed processing of attributes.
void ProcessDeclAttributeDelayed(Decl *D,
const ParsedAttributesView &AttrList);
void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const ParsedAttributesView &AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Map any API notes provided for this declaration to attributes on the
/// declaration.
///
/// Triggered by declaration-attribute processing.
void ProcessAPINotes(Decl *D);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value);
bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckAttrTarget(const ParsedAttr &CurrAttr);
bool CheckAttrNoArgs(const ParsedAttr &CurrAttr);
bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum,
StringRef &Str,
SourceLocation *ArgLocation = nullptr);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceModel SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
SourceLocation Loc);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Check whether a nullability type specifier can be added to the given
/// type through some means not written in source (e.g. API notes).
///
/// \param type The type to which the nullability specifier will be
/// added. On success, this type will be updated appropriately.
///
/// \param nullability The nullability specifier to add.
///
/// \param diagLoc The location to use for diagnostics.
///
/// \param allowArrayTypes Whether to accept nullability specifiers on an
/// array type (e.g., because it will decay to a pointer).
///
/// \param overrideExisting Whether to override an existing, locally-specified
/// nullability specifier rather than complaining about the conflict.
///
/// \returns true if nullability cannot be applied, false otherwise.
bool checkImplicitNullabilityTypeSpecifier(QualType &type,
NullabilityKind nullability,
SourceLocation diagLoc,
bool allowArrayTypes,
bool overrideExisting);
/// Stmt attributes - this routine is the top level dispatcher.
StmtResult ProcessStmtAttributes(Stmt *Stmt,
const ParsedAttributesView &Attrs,
SourceRange Range);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
ObjCInterfaceDecl *IDecl,
SourceLocation AtEnd);
void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
unsigned &Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
/// Returns default addr space for method qualifiers.
LangAS getDefaultCXXMethodAddrSpace() const;
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// - Returns instance or factory methods in global method pool for
/// given selector. It checks the desired kind first, if none is found, and
/// parameter checkTheOther is set, it then checks the other kind. If no such
/// method or only one method is found, function returns false; otherwise, it
/// returns true.
bool
CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool InstanceFirst, bool CheckTheOther,
const ObjCObjectType *TypeBound = nullptr);
bool
AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R, bool receiverIdOrClass,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
void
DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
/// Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg() : E(nullptr) { }
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(
ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt(bool IsStmtExpr);
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) {
S.ActOnStartOfCompoundStmt(IsStmtExpr);
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS,
SourceLocation DotDotDotLoc, ExprResult RHS,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt);
class ConditionResult;
StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
Stmt *InitStmt,
ConditionResult Cond);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond,
Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc, SourceLocation CondLParen,
Expr *Cond, SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First,
ConditionResult Second,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *Begin, Stmt *End,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params,
unsigned OpenMPCaptureLevel = 0);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
enum CopyElisionSemanticsKind {
CES_Strict = 0,
CES_AllowParameters = 1,
CES_AllowDifferentTypes = 2,
CES_AllowExceptionVariables = 4,
CES_FormerDefault = (CES_AllowParameters),
CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes),
CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes |
CES_AllowExceptionVariables),
};
VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
CopyElisionSemanticsKind CESK);
bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
CopyElisionSemanticsKind CESK);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
unsigned NumLabels,
SourceLocation RParenLoc);
void FillInlineAsmIdentifierInfo(Expr *Res,
llvm::InlineAsmIdentifierInfo &Info);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member,
SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
/// Warn if we're implicitly casting from a _Nullable pointer type to a
/// _Nonnull one.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
/// Warn when implicitly casting 0 to nullptr.
void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
ParsingClassDepth++;
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
ParsingClassDepth--;
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReceiver = nullptr);
bool makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason);
/// Issue any -Wunguarded-availability warnings in \c FD
void DiagnoseUnguardedAvailabilityViolations(Decl *FD);
void handleDelayedAvailabilityCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass = nullptr,
bool ObjCPropertyAccess = false,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReciever = nullptr);
void NoteDeletedFunction(FunctionDecl *FD);
void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult CheckUnevaluatedOperand(Expr *E);
void CheckUnusedVolatileAssignment(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
//
// MightBeOdrUse indicates whether the use could possibly be an odr-use, and
// should usually be true. This only needs to be set to false if the lack of
// odr-use cannot be determined from the current context (for instance,
// because the name denotes a virtual function and was written without an
// explicit nested-name-specifier).
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr);
void MarkMemberReferenced(MemberExpr *E);
void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E);
void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc,
unsigned CapturingScopeIndex);
ExprResult CheckLValueToRValueConversionOperand(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
/// Mark all of the declarations referenced within a particular AST node as
/// referenced. Used when template instantiation instantiates a non-dependent
/// type -- entities referenced by the type are now referenced.
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
/// Similar, but diagnostic is only produced if all the specified statements
/// are reachable.
bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S,
IdentifierInfo *II);
ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
/// If \p D cannot be odr-used in the current expression evaluation context,
/// return a reason explaining why. Otherwise, return NOUR_None.
NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D);
DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
NestedNameSpecifierLoc NNS,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance,
const Scope *S);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult
BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, const Scope *S,
TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentKind IK);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
ExprResult BuildUniqueStableName(SourceLocation Loc, TypeSourceInfo *Operand);
ExprResult BuildUniqueStableName(SourceLocation Loc, Expr *E);
ExprResult ActOnUniqueStableNameExpr(SourceLocation OpLoc,
SourceLocation LParen,
SourceLocation RParen, ParsedType Ty);
ExprResult ActOnUniqueStableNameExpr(SourceLocation OpLoc,
SourceLocation LParen,
SourceLocation RParen, Expr *E);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
bool isQualifiedMemberAccess(Expr *E);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
SourceRange ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinMatrixSubscriptExpr(Expr *Base, Expr *RowIdx,
Expr *ColumnIdx,
SourceLocation RBLoc);
ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound, SourceLocation ColonLoc,
Expr *Length, SourceLocation RBLoc);
ExprResult ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc,
SourceLocation RParenLoc,
ArrayRef<Expr *> Dims,
ArrayRef<SourceRange> Brackets);
/// Data structure for iterator expression.
struct OMPIteratorData {
IdentifierInfo *DeclIdent = nullptr;
SourceLocation DeclIdentLoc;
ParsedType Type;
OMPIteratorExpr::IteratorRange Range;
SourceLocation AssignLoc;
SourceLocation ColonLoc;
SourceLocation SecColonLoc;
};
ExprResult ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc,
SourceLocation LLoc, SourceLocation RLoc,
ArrayRef<OMPIteratorData> Data);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
SourceLocation OpLoc,
const CXXScopeSpec &SS, FieldDecl *Field,
DeclAccessPair FoundDecl,
const DeclarationNameInfo &MemberNameInfo);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec *SS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr);
ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false);
enum class AtomicArgumentOrder { API, AST };
ExprResult
BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
SourceLocation RParenLoc, MultiExprArg Args,
AtomicExpr::AtomicOp Op,
AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API);
ExprResult
BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc,
ArrayRef<Expr *> Arg, SourceLocation RParenLoc,
Expr *Config = nullptr, bool IsExecConfig = false,
ADLCallKind UsesADL = ADLCallKind::NotADL);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult BuildInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation EqualOrColonLoc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(Scope *S, SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc);
ExprResult BuildStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc, unsigned TemplateDepth);
// Handle the final expression in a statement expression.
ExprResult ActOnStmtExprResult(ExprResult E);
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(),
// __builtin_COLUMN()
ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc,
SourceLocation RPLoc);
// Build a potentially resolved SourceLocExpr.
ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc, SourceLocation RPLoc,
DeclContext *ParentContext);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// The symbol exists.
IER_Exists,
/// The symbol does not exist.
IER_DoesNotExist,
/// The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc, IdentifierInfo *Ident,
SourceLocation LBrace,
const ParsedAttributesView &AttrList,
UsingDirectiveDecl *&UsingDecl);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
NamespaceDecl *lookupStdExperimentalNamespace();
CXXRecordDecl *getStdBadAlloc() const;
EnumDecl *getStdAlignValT() const;
private:
// A cache representing if we've fully checked the various comparison category
// types stored in ASTContext. The bit-index corresponds to the integer value
// of a ComparisonCategoryType enumerator.
llvm::SmallBitVector FullyCheckedComparisonCategories;
ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl,
CXXScopeSpec &SS,
ParsedType TemplateTypeTy,
IdentifierInfo *MemberOrBase);
public:
enum class ComparisonCategoryUsage {
/// The '<=>' operator was used in an expression and a builtin operator
/// was selected.
OperatorInExpression,
/// A defaulted 'operator<=>' needed the comparison category. This
/// typically only applies to 'std::strong_ordering', due to the implicit
/// fallback return value.
DefaultedOperator,
};
/// Lookup the specified comparison category types in the standard
/// library, an check the VarDecls possibly returned by the operator<=>
/// builtins for that type.
///
/// \return The type of the comparison category type corresponding to the
/// specified Kind, or a null type if an error occurs
QualType CheckComparisonCategoryType(ComparisonCategoryType Kind,
SourceLocation Loc,
ComparisonCategoryUsage Usage);
/// Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const FunctionDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc,
SourceLocation NamespcLoc, CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
const ParsedAttributesView &AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc);
NamedDecl *BuildUsingDeclaration(
Scope *S, AccessSpecifier AS, SourceLocation UsingLoc,
bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS,
DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList, bool IsInstantiation);
NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
/// Given a derived-class using shadow declaration for a constructor and the
/// correspnding base class constructor, find or create the implicit
/// synthesized derived class constructor to use for this initialization.
CXXConstructorDecl *
findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor,
ConstructorUsingShadowDecl *DerivedShadow);
Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation TypenameLoc, CXXScopeSpec &SS,
UnqualifiedId &Name, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc, UnqualifiedId &Name,
const ParsedAttributesView &AttrList,
TypeResult Type, Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
/// Build a CXXConstructExpr whose constructor has already been resolved if
/// it denotes an inherited constructor.
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// Instantiate or parse a C++ default argument expression as necessary.
/// Return true on error.
bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(!isComputedNoexcept(ComputedEST) &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E) { CalledStmt(E); }
/// Integrate an invoked statement into the collected data.
void CalledStmt(Stmt *S);
/// Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_NoexceptFalse;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// Determine what sort of exception specification a defaulted
/// copy constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// default constructor of a class will have, and whether the parameter
/// will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// copy assignment operator of a class will have, and whether the
/// parameter will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// assignment operator of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// destructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification an inheriting
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeInheritingCtorExceptionSpec(SourceLocation Loc,
CXXConstructorDecl *CD);
/// Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, FunctionDecl *FD);
/// Check the given noexcept-specifier, convert its expression, and compute
/// the appropriate ExceptionSpecificationType.
ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr,
ExceptionSpecificationType &EST);
/// Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
class InheritedConstructorInfo;
/// Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
InheritedConstructorInfo *ICI = nullptr,
bool Diagnose = false);
/// Produce notes explaining why a defaulted function was defined as deleted.
void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD);
/// Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor);
/// Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// Check a completed declaration of an implicit special member.
void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD);
/// Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
/// Wrap the expression in a ConstantExpr if it is a potential immediate
/// invocation.
ExprResult CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorTypeForDecltype(const DeclSpec &DS,
ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse
/// {dynamic,static,reinterpret,const,addrspace}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl,
ExprResult Operand,
SourceLocation RParenLoc);
ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI,
Expr *Operand, SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc,
Optional<unsigned> NumExpansions);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// Build a CXXThisExpr and mark it referenced in the current context.
Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit);
void MarkThisReferenced(CXXThisExpr *This);
/// Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr,
bool ByCopy = false);
/// Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
ExprResult
ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs,
SourceLocation AtLoc, SourceLocation RParen);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenOrBraceLoc,
MultiExprArg Exprs,
SourceLocation RParenOrBraceLoc,
bool ListInitialization);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc,
bool ListInitialization);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Optional<Expr *> ArraySize,
SourceRange DirectInitRange,
Expr *Initializer);
/// Determine whether \p FD is an aligned allocation or deallocation
/// function that is unavailable.
bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const;
/// Produce diagnostics if \p FD is an aligned allocation or deallocation
/// function that is unavailable.
void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
SourceLocation Loc);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
/// The scope in which to find allocation functions.
enum AllocationFunctionScope {
/// Only look for allocation functions in the global scope.
AFS_Global,
/// Only look for allocation functions in the scope of the
/// allocated class.
AFS_Class,
/// Look for allocation functions in both the global scope
/// and in the scope of the allocated class.
AFS_Both
};
/// Finds the overloads of operator new and delete that are appropriate
/// for the allocation.
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
AllocationFunctionScope NewScope,
AllocationFunctionScope DeleteScope,
QualType AllocType, bool IsArray,
bool &PassAlignment, MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
ArrayRef<QualType> Params);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
bool Overaligned,
DeclarationName Name);
FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc,
CXXRecordDecl *RD);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
bool IsDelete, bool CallCanBeVirtual,
bool WarnOnNonAbstractTypes,
SourceLocation DtorLoc);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the binary type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
MaterializeTemporaryExpr *
CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary,
bool BoundToLvalueReference);
ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) {
return ActOnFinishFullExpr(
Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue);
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue, bool IsConstexpr = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
/// Keeps information about an identifier in a nested-name-spec.
///
struct NestedNameSpecInfo {
/// The type of the object, if we're parsing nested-name-specifier in
/// a member access expression.
ParsedType ObjectType;
/// The identifier preceding the '::'.
IdentifierInfo *Identifier;
/// The location of the identifier.
SourceLocation IdentifierLoc;
/// The location of the '::'.
SourceLocation CCLoc;
/// Creates info object for the most typical case.
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType())
: ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc),
CCLoc(ColonColonLoc) {
}
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, QualType ObjectType)
: ObjectType(ParsedType::make(ObjectType)), Identifier(II),
IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) {
}
};
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo);
bool BuildCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
/// The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param IdInfo Parser information about an identifier in the
/// nested-name-spec.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \param OnlyNamespace If true, only considers namespaces in lookup.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo,
bool EnteringContext);
/// The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params,
ConstexprSpecKind ConstexprKind,
Expr *TrailingRequiresClause);
/// Number lambda for linkage purposes if necessary.
void handleLambdaNumbering(
CXXRecordDecl *Class, CXXMethodDecl *Method,
Optional<std::tuple<unsigned, bool, Decl *>> Mangling = None);
/// Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
ParsedType actOnLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) {
return ParsedType::make(buildLambdaInitCaptureInitialization(
Loc, ByRef, EllipsisLoc, None, Id,
InitKind != LambdaCaptureInitKind::CopyInit, Init));
}
QualType buildLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit,
Expr *&Init);
/// Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType,
SourceLocation EllipsisLoc,
IdentifierInfo *Id,
unsigned InitStyle, Expr *Init);
/// Add an init-capture to a lambda scope.
void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief This is called after parsing the explicit template parameter list
/// on a lambda (if it exists) in C++2a.
void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> TParams,
SourceLocation RAngleLoc);
/// Introduce the lambda parameters into scope.
void addLambdaParameters(
ArrayRef<LambdaIntroducer::LambdaCapture> Captures,
CXXMethodDecl *CallOperator, Scope *CurScope);
/// Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// Does copying/destroying the captured variable have side effects?
bool CaptureHasSideEffects(const sema::Capture &From);
/// Diagnose if an explicit lambda capture is unused. Returns true if a
/// diagnostic is emitted.
bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange,
const sema::Capture &From);
/// Build a FieldDecl suitable to hold the given capture.
FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture);
/// Initialize the given capture with a suitable expression.
ExprResult BuildCaptureInit(const sema::Capture &Capture,
SourceLocation ImplicitCaptureLoc,
bool IsOpenMPMapping = false);
/// Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// Get the return type to use for a lambda's conversion function(s) to
/// function pointer type, given the type of the call operator.
QualType
getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType);
/// Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
/// Check whether the given expression is a valid constraint expression.
/// A diagnostic is emitted if it is not, false is returned, and
/// PossibleNonPrimary will be set to true if the failure might be due to a
/// non-primary expression being used as an atomic constraint.
bool CheckConstraintExpression(const Expr *CE, Token NextToken = Token(),
bool *PossibleNonPrimary = nullptr,
bool IsTrailingRequiresClause = false);
private:
/// Caches pairs of template-like decls whose associated constraints were
/// checked for subsumption and whether or not the first's constraints did in
/// fact subsume the second's.
llvm::DenseMap<std::pair<NamedDecl *, NamedDecl *>, bool> SubsumptionCache;
/// Caches the normalized associated constraints of declarations (concepts or
/// constrained declarations). If an error occurred while normalizing the
/// associated constraints of the template or concept, nullptr will be cached
/// here.
llvm::DenseMap<NamedDecl *, NormalizedConstraint *>
NormalizationCache;
llvm::ContextualFoldingSet<ConstraintSatisfaction, const ASTContext &>
SatisfactionCache;
public:
const NormalizedConstraint *
getNormalizedAssociatedConstraints(
NamedDecl *ConstrainedDecl, ArrayRef<const Expr *> AssociatedConstraints);
/// \brief Check whether the given declaration's associated constraints are
/// at least as constrained than another declaration's according to the
/// partial ordering of constraints.
///
/// \param Result If no error occurred, receives the result of true if D1 is
/// at least constrained than D2, and false otherwise.
///
/// \returns true if an error occurred, false otherwise.
bool IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef<const Expr *> AC1,
NamedDecl *D2, ArrayRef<const Expr *> AC2,
bool &Result);
/// If D1 was not at least as constrained as D2, but would've been if a pair
/// of atomic constraints involved had been declared in a concept and not
/// repeated in two separate places in code.
/// \returns true if such a diagnostic was emitted, false otherwise.
bool MaybeEmitAmbiguousAtomicConstraintsDiagnostic(NamedDecl *D1,
ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2);
/// \brief Check whether the given list of constraint expressions are
/// satisfied (as if in a 'conjunction') given template arguments.
/// \param Template the template-like entity that triggered the constraints
/// check (either a concept or a constrained entity).
/// \param ConstraintExprs a list of constraint expressions, treated as if
/// they were 'AND'ed together.
/// \param TemplateArgs the list of template arguments to substitute into the
/// constraint expression.
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
/// \param Satisfaction if true is returned, will contain details of the
/// satisfaction, with enough information to diagnose an unsatisfied
/// expression.
/// \returns true if an error occurred and satisfaction could not be checked,
/// false otherwise.
bool CheckConstraintSatisfaction(
const NamedDecl *Template, ArrayRef<const Expr *> ConstraintExprs,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction);
/// \brief Check whether the given non-dependent constraint expression is
/// satisfied. Returns false and updates Satisfaction with the satisfaction
/// verdict if successful, emits a diagnostic and returns true if an error
/// occured and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckConstraintSatisfaction(const Expr *ConstraintExpr,
ConstraintSatisfaction &Satisfaction);
/// Check whether the given function decl's trailing requires clause is
/// satisfied, if any. Returns false and updates Satisfaction with the
/// satisfaction verdict if successful, emits a diagnostic and returns true if
/// an error occured and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckFunctionConstraints(const FunctionDecl *FD,
ConstraintSatisfaction &Satisfaction,
SourceLocation UsageLoc = SourceLocation());
/// \brief Ensure that the given template arguments satisfy the constraints
/// associated with the given template, emitting a diagnostic if they do not.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateArgs The converted, canonicalized template arguments.
///
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
///
/// \returns true if the constrains are not satisfied or could not be checked
/// for satisfaction, false if the constraints are satisfied.
bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
/// \param First whether this is the first time an unsatisfied constraint is
/// diagnosed for this error.
void
DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction &Satisfaction,
bool First = true);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
void
DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction &Satisfaction,
bool First = true);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied because it was ill-formed.
void DiagnoseUnsatisfiedIllFormedConstraint(SourceLocation DiagnosticLocation,
StringRef Diagnostic);
void DiagnoseRedeclarationConstraintMismatch(SourceLocation Old,
SourceLocation New);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
ArrayRef<Expr *> Strings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS);
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc,
SourceLocation ColonLoc,
const ParsedAttributesView &Attrs);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// Mark destructors of virtual bases of this class referenced. In the Itanium
/// C++ ABI, this is done when emitting a destructor for any non-abstract
/// class. In the Microsoft C++ ABI, this is done any time a class's
/// destructor is referenced.
void MarkVirtualBaseDestructorsReferenced(
SourceLocation Location, CXXRecordDecl *ClassDecl,
llvm::SmallPtrSetImpl<const RecordType *> *DirectVirtualBases = nullptr);
/// Do semantic checks to allow the complete destructor variant to be emitted
/// when the destructor is defined in another translation unit. In the Itanium
/// C++ ABI, destructor variants are emitted together. In the MS C++ ABI, they
/// can be emitted in separate TUs. To emit the complete variant, run a subset
/// of the checks performed when emitting a regular destructor.
void CheckCompleteDestructorVariant(SourceLocation CurrentLocation,
CXXDestructorDecl *Dtor);
/// The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD,
bool ConstexprOnly = false);
/// Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
/// Check class-level dllimport/dllexport attribute. The caller must
/// ensure that referenceDLLExportedClassMethods is called some point later
/// when all outer classes of Class are complete.
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class);
void referenceDLLExportedClassMethods();
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
/// Add gsl::Pointer attribute to std::container::iterator
/// \param ND The declaration that introduces the name
/// std::container::iterator. \param UnderlyingRecord The record named by ND.
void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord);
/// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types.
void inferGslOwnerPointerAttribute(CXXRecordDecl *Record);
/// Add [[gsl::Pointer]] attributes for std:: types.
void inferGslPointerAttribute(TypedefNameDecl *TD);
void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record);
/// Check that the C++ class annoated with "trivial_abi" satisfies all the
/// conditions that are needed for the attribute to have an effect.
void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD);
void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc,
Decl *TagDecl, SourceLocation LBrac,
SourceLocation RBrac,
const ParsedAttributesView &AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXNonNestedClass();
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC);
void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD);
void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD);
bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
CXXSpecialMember CSM);
void CheckDelayedMemberExceptionSpecs();
bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD,
DefaultedComparisonKind DCK);
void DeclareImplicitEqualityComparison(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD,
DefaultedComparisonKind DCK);
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases);
void ActOnBaseSpecifiers(Decl *ClassDecl,
MutableArrayRef<CXXBaseSpecifier *> Bases);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbiguousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
bool IgnoreAccess = false);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult
CheckStructuredBindingMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *DecomposedClass,
DeclAccessPair Field);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass,
QualType BaseType);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found, QualType ObjectType,
SourceLocation Loc,
const PartialDiagnostic &Diag);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found,
QualType ObjectType) {
return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType,
SourceLocation(), PDiag());
}
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool isAbstractType(SourceLocation Loc, QualType T);
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true,
bool AllowNonTemplateFunctions = false);
/// Try to interpret the lookup result D as a template-name.
///
/// \param D A declaration found by name lookup.
/// \param AllowFunctionTemplates Whether function templates should be
/// considered valid results.
/// \param AllowDependent Whether unresolved using declarations (that might
/// name templates) should be considered valid results.
NamedDecl *getAsTemplateNameDecl(NamedDecl *D,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
enum TemplateNameIsRequiredTag { TemplateNameIsRequired };
/// Whether and why a template name is required in this lookup.
class RequiredTemplateKind {
public:
/// Template name is required if TemplateKWLoc is valid.
RequiredTemplateKind(SourceLocation TemplateKWLoc = SourceLocation())
: TemplateKW(TemplateKWLoc) {}
/// Template name is unconditionally required.
RequiredTemplateKind(TemplateNameIsRequiredTag) : TemplateKW() {}
SourceLocation getTemplateKeywordLoc() const {
return TemplateKW.getValueOr(SourceLocation());
}
bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); }
bool isRequired() const { return TemplateKW != SourceLocation(); }
explicit operator bool() const { return isRequired(); }
private:
llvm::Optional<SourceLocation> TemplateKW;
};
enum class AssumedTemplateKind {
/// This is not assumed to be a template name.
None,
/// This is assumed to be a template name because lookup found nothing.
FoundNothing,
/// This is assumed to be a template name because lookup found one or more
/// functions (but no function templates).
FoundFunctions,
};
bool LookupTemplateName(
LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType,
bool EnteringContext, bool &MemberOfUnknownSpecialization,
RequiredTemplateKind RequiredTemplate = SourceLocation(),
AssumedTemplateKind *ATK = nullptr, bool AllowTypoCorrection = true);
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
const UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization,
bool Disambiguation = false);
/// Try to resolve an undeclared template name as a type template.
///
/// Sets II to the identifier corresponding to the template name, and updates
/// Name to a corresponding (typo-corrected) type template name and TNK to
/// the corresponding kind, if possible.
void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name,
TemplateNameKind &TNK,
SourceLocation NameLoc,
IdentifierInfo *&II);
bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name,
SourceLocation NameLoc,
bool Diagnose = true);
/// Determine whether a particular identifier might be the name in a C++1z
/// deduction-guide declaration.
bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
SourceLocation NameLoc,
ParsedTemplateTy *Template = nullptr);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
NamedDecl *Instantiation,
bool InstantiatedFromMember,
const NamedDecl *Pattern,
const NamedDecl *PatternDef,
TemplateSpecializationKind TSK,
bool Complain = true);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
NamedDecl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg, bool HasTypeConstraint);
bool ActOnTypeConstraint(const CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool AttachTypeConstraint(NestedNameSpecifierLoc NS,
DeclarationNameInfo NameInfo,
ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool AttachTypeConstraint(AutoTypeLoc TL,
NonTypeTemplateParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
NamedDecl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> Params,
SourceLocation RAngleLoc,
Expr *RequiresClause);
/// The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC,
SkipBodyInfo *SkipBody = nullptr);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsMemberSpecialization, bool &Invalid,
bool SuppressDiagnostic = false);
DeclResult CheckClassTemplate(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
QualType NTTPType,
SourceLocation Loc);
/// Get a template argument mapping the given template parameter to itself,
/// e.g. for X in \c template<int X>, this would return an expression template
/// argument referencing X.
TemplateArgumentLoc getIdentityTemplateArgumentLoc(NamedDecl *Param,
SourceLocation Location);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc, SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false, bool IsClassName = false);
/// Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult
CheckConceptTemplateId(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &ConceptNameInfo,
NamedDecl *FoundDecl, ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs);
void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnTemplateName(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext,
TemplateTy &Template, bool AllowInjectedClassName = false);
DeclResult ActOnClassTemplateSpecialization(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
SourceLocation ModulePrivateLoc, CXXScopeSpec &SS,
TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc,
TemplateDecl *PrimaryTemplate,
unsigned NumExplicitArgs,
ArrayRef<TemplateArgument> Args);
void CheckTemplatePartialSpecialization(
ClassTemplatePartialSpecializationDecl *Partial);
void CheckTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl *Partial);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(
FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous, bool QualifiedFriend = false);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult ActOnExplicitInstantiation(
Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS,
TemplateTy Template, SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc, const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to
/// contain the converted forms of the template arguments as written.
/// Otherwise, \p TemplateArgs will not be modified.
///
/// \param ConstraintsNotSatisfied If provided, and an error occured, will
/// receive true if the cause for the error is the associated constraints of
/// the template not being satisfied by the template arguments.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted,
bool UpdateArgsWithConversions = true,
bool *ConstraintsNotSatisfied = nullptr);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param,
TemplateParameterList *Params,
TemplateArgumentLoc &Arg);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateII The identifier used to name the template.
/// \param TemplateIILoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc,
TypeSourceInfo **TSI,
bool DeducedTSTContext);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc,
bool DeducedTSTContext = true);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
//===--------------------------------------------------------------------===//
// C++ Concepts
//===--------------------------------------------------------------------===//
Decl *ActOnConceptDefinition(
Scope *S, MultiTemplateParamsArg TemplateParameterLists,
IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr);
RequiresExprBodyDecl *
ActOnStartRequiresExpr(SourceLocation RequiresKWLoc,
ArrayRef<ParmVarDecl *> LocalParameters,
Scope *BodyScope);
void ActOnFinishRequiresExpr();
concepts::Requirement *ActOnSimpleRequirement(Expr *E);
concepts::Requirement *ActOnTypeRequirement(
SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc,
IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId);
concepts::Requirement *ActOnCompoundRequirement(Expr *E,
SourceLocation NoexceptLoc);
concepts::Requirement *
ActOnCompoundRequirement(
Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint, unsigned Depth);
concepts::Requirement *ActOnNestedRequirement(Expr *Constraint);
concepts::ExprRequirement *
BuildExprRequirement(
Expr *E, bool IsSatisfied, SourceLocation NoexceptLoc,
concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement);
concepts::ExprRequirement *
BuildExprRequirement(
concepts::Requirement::SubstitutionDiagnostic *ExprSubstDiag,
bool IsSatisfied, SourceLocation NoexceptLoc,
concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement);
concepts::TypeRequirement *BuildTypeRequirement(TypeSourceInfo *Type);
concepts::TypeRequirement *
BuildTypeRequirement(
concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
concepts::NestedRequirement *BuildNestedRequirement(Expr *E);
concepts::NestedRequirement *
BuildNestedRequirement(
concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
ExprResult ActOnRequiresExpr(SourceLocation RequiresKWLoc,
RequiresExprBodyDecl *Body,
ArrayRef<ParmVarDecl *> LocalParameters,
ArrayRef<concepts::Requirement *> Requirements,
SourceLocation ClosingBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// An arbitrary expression.
UPPC_Expression = 0,
/// The base type of a class type.
UPPC_BaseType,
/// The type of an arbitrary declaration.
UPPC_DeclarationType,
/// The type of a data member.
UPPC_DataMemberType,
/// The size of a bit-field.
UPPC_BitFieldWidth,
/// The expression in a static assertion.
UPPC_StaticAssertExpression,
/// The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// The enumerator value.
UPPC_EnumeratorValue,
/// A using declaration.
UPPC_UsingDeclaration,
/// A friend declaration.
UPPC_FriendDeclaration,
/// A declaration qualifier.
UPPC_DeclarationQualifier,
/// An initializer.
UPPC_Initializer,
/// A default argument.
UPPC_DefaultArgument,
/// The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// The type of an exception.
UPPC_ExceptionType,
/// Partial specialization.
UPPC_PartialSpecialization,
/// Microsoft __if_exists.
UPPC_IfExists,
/// Microsoft __if_not_exists.
UPPC_IfNotExists,
/// Lambda expression.
UPPC_Lambda,
/// Block expression,
UPPC_Block,
/// A type constraint,
UPPC_TypeConstraint
};
/// Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param NNS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
/// Given a template argument that contains an unexpanded parameter pack, but
/// which has already been substituted, attempt to determine the number of
/// elements that will be produced once this argument is fully-expanded.
///
/// This is intended for use when transforming 'sizeof...(Arg)' in order to
/// avoid actually expanding the pack where possible.
Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg);
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
/// Adjust the type \p ArgFunctionType to match the calling convention,
/// noreturn, and optionally the exception specification of \p FunctionType.
/// Deduction often wants to ignore these properties when matching function
/// types.
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType,
bool AdjustExceptionSpec = false);
/// Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// Template argument deduction was successful.
TDK_Success = 0,
/// The declaration was invalid; do nothing.
TDK_Invalid,
/// Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// Template argument deduction did not deduce a value for every
/// expansion of an expanded template parameter pack.
TDK_IncompletePack,
/// Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// After substituting deduced template arguments, a dependent
/// parameter type did not match the corresponding argument.
TDK_DeducedMismatch,
/// After substituting deduced template arguments, an element of
/// a dependent parameter type did not match the corresponding element
/// of the corresponding argument (when deducing from an initializer list).
TDK_DeducedMismatchNested,
/// A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// Checking non-dependent argument conversions failed.
TDK_NonDependentConversionFailure,
/// The deduced arguments did not satisfy the constraints associated
/// with the template.
TDK_ConstraintsNotSatisfied,
/// Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure,
/// CUDA Target attributes do not match.
TDK_CUDATargetMismatch
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType, bool DecomposedParam,
unsigned ArgIdx, QualType OriginalArgType)
: OriginalParamType(OriginalParamType),
DecomposedParam(DecomposedParam), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) {}
QualType OriginalParamType;
bool DecomposedParam;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult FinishTemplateArgumentDeduction(
FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified, FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false,
llvm::function_ref<bool()> CheckNonDependent = []{ return false; });
TemplateDeductionResult DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info,
bool PartialOverloading,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
/// Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. This does not retain any \c auto type sugar.
QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement);
/// Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult
DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None,
bool IgnoreConstraints = false);
DeduceAutoResult
DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None,
bool IgnoreConstraints = false);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
/// Declare implicit deduction guides for a class template if we've
/// not already done so.
void DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc);
QualType DeduceTemplateSpecializationFromInitializer(
TypeSourceInfo *TInfo, const InitializedEntity &Entity,
const InitializationKind &Kind, MultiExprArg Init);
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *Init);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(
FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc,
TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1,
unsigned NumCallArguments2, bool Reversed = false);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
bool isTemplateTemplateParameterAtLeastAsSpecializedAs(
TemplateParameterList *PParam, TemplateDecl *AArg, SourceLocation Loc);
void MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced,
unsigned Depth, llvm::SmallBitVector &Used);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// A context in which code is being synthesized (where a source location
/// alone is not sufficient to identify the context). This covers template
/// instantiation and various forms of implicitly-generated functions.
struct CodeSynthesisContext {
/// The kind of template instantiation we are performing
enum SynthesisKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template parameter whose argument is
/// being instantiated, the Template is the template, and the
/// TemplateArgs/NumTemplateArguments provide the template arguments as
/// specified.
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or
/// a TemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are computing the exception specification for a defaulted special
/// member function.
ExceptionSpecEvaluation,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation,
/// We are instantiating a requirement of a requires expression.
RequirementInstantiation,
/// We are checking the satisfaction of a nested requirement of a requires
/// expression.
NestedRequirementConstraintsCheck,
/// We are declaring an implicit special member function.
DeclaringSpecialMember,
/// We are declaring an implicit 'operator==' for a defaulted
/// 'operator<=>'.
DeclaringImplicitEqualityComparison,
/// We are defining a synthesized function (such as a defaulted special
/// member).
DefiningSynthesizedFunction,
// We are checking the constraints associated with a constrained entity or
// the constraint expression of a concept. This includes the checks that
// atomic constraints have the type 'bool' and that they can be constant
// evaluated.
ConstraintsCheck,
// We are substituting template arguments into a constraint expression.
ConstraintSubstitution,
// We are normalizing a constraint expression.
ConstraintNormalization,
// We are substituting into the parameter mapping of an atomic constraint
// during normalization.
ParameterMappingSubstitution,
/// We are rewriting a comparison operator in terms of an operator<=>.
RewritingOperatorAsSpaceship,
/// Added for Template instantiation observation.
/// Memoization means we are _not_ instantiating a template because
/// it is already instantiated (but we entered a context where we
/// would have had to if it was not already instantiated).
Memoization
} Kind;
/// Was the enclosing context a non-instantiation SFINAE context?
bool SavedInNonInstantiationSFINAEContext;
/// The point of instantiation or synthesis within the source code.
SourceLocation PointOfInstantiation;
/// The entity that is being synthesized.
Decl *Entity;
/// The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
// FIXME: Wrap this union around more members, or perhaps store the
// kind-specific members in the RAII object owning the context.
union {
/// The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// The special member being declared or defined.
CXXSpecialMember SpecialMember;
};
ArrayRef<TemplateArgument> template_arguments() const {
assert(Kind != DeclaringSpecialMember);
return {TemplateArgs, NumTemplateArgs};
}
/// The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
CodeSynthesisContext()
: Kind(TemplateInstantiation),
SavedInNonInstantiationSFINAEContext(false), Entity(nullptr),
Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0),
DeductionInfo(nullptr) {}
/// Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
};
/// List of active code synthesis contexts.
///
/// This vector is treated as a stack. As synthesis of one entity requires
/// synthesis of another, additional contexts are pushed onto the stack.
SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts;
/// Specializations whose definitions are currently being instantiated.
llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
/// Non-dependent types used in templates that have already been instantiated
/// by some template instantiation.
llvm::DenseSet<QualType> InstantiatedNonDependentTypes;
/// Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> CodeSynthesisContextLookupModules;
/// Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// Map from the most recent declaration of a namespace to the most
/// recent visible declaration of that namespace.
llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache;
/// Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// The number of \p CodeSynthesisContexts that are not template
/// instantiations and, therefore, should not be counted as part of the
/// instantiation depth.
///
/// When the instantiation depth reaches the user-configurable limit
/// \p LangOptions::InstantiationDepth we will abort instantiation.
// FIXME: Should we have a similar limit for other forms of synthesis?
unsigned NonInstantiationEntries;
/// The depth of the context stack at the point when the most recent
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant context stacks
/// when there are multiple errors or warnings in the same instantiation.
// FIXME: Does this belong in Sema? It's tough to implement it anywhere else.
unsigned LastEmittedCodeSynthesisContextDepth = 0;
/// The template instantiation callbacks to trace or track
/// instantiations (objects can be chained).
///
/// This callbacks is used to print, trace or track template
/// instantiations as they are being constructed.
std::vector<std::unique_ptr<TemplateInstantiationCallback>>
TemplateInstCallbacks;
/// The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// Note that we are instantiating a class template,
/// function template, variable template, alias template,
/// or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateParameter Param, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting either explicitly-specified or
/// deduced template arguments during function template argument deduction.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
CodeSynthesisContext::SynthesisKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template declaration.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument for a function
/// parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintsCheck {};
/// \brief Note that we are checking the constraints associated with some
/// constrained entity (a concept declaration or a template with associated
/// constraints).
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintsCheck, NamedDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintSubstitution {};
/// \brief Note that we are checking a constraint expression associated
/// with a template declaration or as part of the satisfaction check of a
/// concept.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintSubstitution, NamedDecl *Template,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange);
struct ConstraintNormalization {};
/// \brief Note that we are normalizing a constraint expression.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintNormalization, NamedDecl *Template,
SourceRange InstantiationRange);
struct ParameterMappingSubstitution {};
/// \brief Note that we are subtituting into the parameter mapping of an
/// atomic constraint during constraint normalization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParameterMappingSubstitution, NamedDecl *Template,
SourceRange InstantiationRange);
/// \brief Note that we are substituting template arguments into a part of
/// a requirement of a requires expression.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
concepts::Requirement *Req,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are checking the satisfaction of the constraint
/// expression inside of a nested requirement.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
concepts::NestedRequirement *Req, ConstraintsCheck,
SourceRange InstantiationRange = SourceRange());
/// Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
/// Determine whether we are already instantiating this
/// specialization in some surrounding active instantiation.
bool isAlreadyInstantiating() const { return AlreadyInstantiating; }
private:
Sema &SemaRef;
bool Invalid;
bool AlreadyInstantiating;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = None,
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void pushCodeSynthesisContext(CodeSynthesisContext Ctx);
void popCodeSynthesisContext();
/// Determine whether we are currently performing template instantiation.
bool inTemplateInstantiation() const {
return CodeSynthesisContexts.size() > NonInstantiationEntries;
}
void PrintContextStack() {
if (!CodeSynthesisContexts.empty() &&
CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) {
PrintInstantiationStack();
LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size();
}
if (PragmaAttributeCurrentTargetDecl)
PrintPragmaAttributeInstantiationPoint();
}
void PrintInstantiationStack();
void PrintPragmaAttributeInstantiationPoint();
/// Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
bool PrevLastDiagnosticIgnored;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE),
PrevLastDiagnosticIgnored(
SemaRef.getDiagnostics().isLastDiagnosticIgnored())
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
SemaRef.getDiagnostics().setLastDiagnosticIgnored(
PrevLastDiagnosticIgnored);
}
/// Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
/// Queue of implicit template instantiations that cannot be performed
/// eagerly.
SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations;
class GlobalEagerInstantiationScope {
public:
GlobalEagerInstantiationScope(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
void perform() {
if (Enabled) {
S.DefineUsedVTables();
S.PerformPendingInstantiations();
}
}
~GlobalEagerInstantiationScope() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class LocalEagerInstantiationScope {
public:
LocalEagerInstantiationScope(Sema &S) : S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); }
~LocalEagerInstantiationScope() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
/// A helper class for building up ExtParameterInfos.
class ExtParameterInfoBuilder {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos;
bool HasInteresting = false;
public:
/// Set the ExtParameterInfo for the parameter at the given index,
///
void set(unsigned index, FunctionProtoType::ExtParameterInfo info) {
assert(Infos.size() <= index);
Infos.resize(index);
Infos.push_back(info);
if (!HasInteresting)
HasInteresting = (info != FunctionProtoType::ExtParameterInfo());
}
/// Return a pointer (suitable for setting in an ExtProtoInfo) to the
/// ExtParameterInfo array we've built up.
const FunctionProtoType::ExtParameterInfo *
getPointerOrNull(unsigned numParams) {
if (!HasInteresting) return nullptr;
Infos.resize(numParams);
return Infos.data();
}
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity,
bool AllowDeducedTST = false);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
Qualifiers ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
bool SubstExceptionSpec(SourceLocation Loc,
FunctionProtoType::ExceptionSpecInfo &ESI,
SmallVectorImpl<QualType> &ExceptionStorage,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams,
ExtParameterInfoBuilder &ParamInfos);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateParameterList *
SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
SubstTemplateArguments(ArrayRef<TemplateArgumentLoc> Args,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateArgumentListInfo &Outputs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the name and return type of a defaulted 'operator<=>' to form
/// an implicit 'operator=='.
FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void
InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
bool usesPartialOrExplicitSpecialization(
SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
bool CheckInstantiatedFunctionTemplateConstraints(
SourceLocation PointOfInstantiation, FunctionDecl *Decl,
ArrayRef<TemplateArgument> TemplateArgs,
ConstraintSatisfaction &Satisfaction);
FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
const TemplateArgumentList *Args,
SourceLocation Loc);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation, void *InsertPos,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false,
VarTemplateSpecializationDecl *PrevVTSD = nullptr);
VarDecl *getVarTemplateSpecialization(
VarTemplateDecl *VarTempl, const TemplateArgumentListInfo *TemplateArgs,
const DeclarationNameInfo &MemberNameInfo, SourceLocation TemplateKWLoc);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext = false);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(
Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName, SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
SmallVectorImpl<SourceLocation> &ProtocolLocs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName,
SourceLocation ProtocolLoc, Decl *const *ProtoRefNames,
unsigned NumProtoRefs, const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryInterface(
SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc,
const ParsedAttributesView &AttrList);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy
ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
ArrayRef<IdentifierLocPair> IdentList,
const ParsedAttributesView &attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
ArrayRef<IdentifierLocPair> ProtocolId,
SmallVectorImpl<Decl *> &Protocols);
void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId,
SourceLocation ProtocolLoc,
IdentifierInfo *TypeArgId,
SourceLocation TypeArgLoc,
bool SelectProtocolFirst = false);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C type parameter type.
QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
void ProcessPropertyDecl(ObjCPropertyDecl *property);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc,
ObjCPropertyQueryKind QueryKind);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
ParsedAttributesView ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo,
unsigned CNumArgs, // c-style args
const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
void deduceOpenCLAddressSpace(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// The message is sent to 'super'.
ObjCSuperMessage,
/// The message is an instance message.
ObjCInstanceMessage,
/// The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs, bool Diagnose = true);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr, bool Diagnose = true);
bool CheckConversionToObjCLiteral(QualType DstType, Expr *&SrcExpr,
bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
/// Check whether the declared result type of the given Objective-C
/// method declaration is compatible with the method's class.
ResultTypeCompatibilityKind
checkRelatedResultTypeCompatibility(const ObjCMethodDecl *Method,
const ObjCInterfaceDecl *CurrentClass);
void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method,
ObjCMethodDecl *overridden);
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaClangSection - Called on well formed \#pragma clang section
void ActOnPragmaClangSection(SourceLocation PragmaLoc,
PragmaClangSectionAction Action,
PragmaClangSectionKind SecKind, StringRef SecName);
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
StringRef SlotLabel, Expr *Alignment);
enum class PragmaPackDiagnoseKind {
NonDefaultStateAtInclude,
ChangedStateAtExit
};
void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind,
SourceLocation IncludeLoc);
void DiagnoseUnterminatedPragmaPack();
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind,
StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
SourceLocation PragmaLoc,
MSVtorDispMode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName,
int SectionFlags,
DeclaratorDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
/// Are precise floating point semantics currently enabled?
bool isPreciseFPEnabled() {
return !CurFPFeatures.allowAssociativeMath() &&
!CurFPFeatures.noSignedZeros() &&
!CurFPFeatures.allowReciprocalMath() &&
!CurFPFeatures.allowApproximateFunctions();
}
/// ActOnPragmaFloatControl - Call on well-formed \#pragma float_control
void ActOnPragmaFloatControl(SourceLocation Loc, PragmaMsStackAction Action,
PragmaFloatControlKind Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT and
/// \#pragma clang fp contract
void ActOnPragmaFPContract(LangOptions::FPModeKind FPC);
/// Called on well formed
/// \#pragma clang fp reassociate
void ActOnPragmaFPReassociate(bool IsEnabled);
/// ActOnPragmaFenvAccess - Called on well formed
/// \#pragma STDC FENV_ACCESS
void ActOnPragmaFEnvAccess(SourceLocation Loc, bool IsEnabled);
/// Called to set rounding mode for floating point operations.
void setRoundingMode(llvm::RoundingMode);
/// Called to set exception behavior for floating point operations.
void setExceptionMode(LangOptions::FPExceptionModeKind);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// FreePackedContext - Deallocate and null out PackContext.
void FreePackedContext();
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute,
SourceLocation PragmaLoc,
attr::ParsedSubjectMatchRuleSet Rules);
void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Called on well-formed '\#pragma clang attribute pop'.
void ActOnPragmaAttributePop(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Adds the attributes that have been specified using the
/// '\#pragma clang attribute push' directives to the given declaration.
void AddPragmaAttributes(Scope *S, Decl *D);
void DiagnoseUnterminatedPragmaAttribute();
/// Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
bool IsPackExpansion);
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T,
bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
Expr *OE);
/// AddAllocAlignAttr - Adds an alloc_align attribute to a particular
/// declaration.
void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *ParamExpr);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *MaxThreads, Expr *MinBlocks);
/// AddModeAttr - Adds a mode attribute to a particular declaration.
void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name,
bool InInstantiation = false);
void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI,
ParameterABI ABI);
enum class RetainOwnershipKind {NS, CF, OS};
void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI,
RetainOwnershipKind K, bool IsTemplateInstantiation);
/// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size
/// attribute to a particular declaration.
void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
/// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a
/// particular declaration.
void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type);
//===--------------------------------------------------------------------===//
// C++ Coroutines TS
//
bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc,
StringRef Keyword);
ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
UnresolvedLookupExpr* Lookup);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs);
bool buildCoroutineParameterMoves(SourceLocation Loc);
VarDecl *buildCoroutinePromise(SourceLocation Loc);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc,
SourceLocation FuncLoc);
//===--------------------------------------------------------------------===//
// OpenCL extensions.
//
private:
std::string CurrOpenCLExtension;
/// Extensions required by an OpenCL type.
llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap;
/// Extensions required by an OpenCL declaration.
llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap;
public:
llvm::StringRef getCurrentOpenCLExtension() const {
return CurrOpenCLExtension;
}
/// Check if a function declaration \p FD associates with any
/// extensions present in OpenCLDeclExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD);
/// Check if a function type \p FT associates with any
/// extensions present in OpenCLTypeExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT);
/// Find an extension in an appropriate extension map and return its name
template<typename T, typename MapT>
std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map);
void setCurrentOpenCLExtension(llvm::StringRef Ext) {
CurrOpenCLExtension = std::string(Ext);
}
/// Set OpenCL extensions for a type which can only be used when these
/// OpenCL extensions are enabled. If \p Exts is empty, do nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts);
/// Set OpenCL extensions for a declaration which can only be
/// used when these OpenCL extensions are enabled. If \p Exts is empty, do
/// nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts);
/// Set current OpenCL extensions for a type which can only be used
/// when these OpenCL extensions are enabled. If current OpenCL extension is
/// empty, do nothing.
void setCurrentOpenCLExtensionForType(QualType T);
/// Set current OpenCL extensions for a declaration which
/// can only be used when these OpenCL extensions are enabled. If current
/// OpenCL extension is empty, do nothing.
void setCurrentOpenCLExtensionForDecl(Decl *FD);
bool isOpenCLDisabledDecl(Decl *FD);
/// Check if type \p T corresponding to declaration specifier \p DS
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T);
/// Check if declaration \p D used by expression \p E
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E);
//===--------------------------------------------------------------------===//
// OpenMP directives and clauses.
//
private:
void *VarDataSharingAttributesStack;
/// Number of nested '#pragma omp declare target' directives.
unsigned DeclareTargetNestingLevel = 0;
/// Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult
VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
bool StrictlyPositive = true);
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
/// Adjusts the function scopes index for the target-based regions.
void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const;
/// Returns the number of scopes associated with the construct on the given
/// OpenMP level.
int getNumberOfConstructScopes(unsigned Level) const;
/// Push new OpenMP function region for non-capturing function.
void pushOpenMPFunctionRegion();
/// Pop OpenMP function region for non-capturing function.
void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI);
/// Checks if a type or a declaration is disabled due to the owning extension
/// being disabled, and emits diagnostic messages if it is disabled.
/// \param D type or declaration to be checked.
/// \param DiagLoc source location for the diagnostic message.
/// \param DiagInfo information to be emitted for the diagnostic message.
/// \param SrcRange source range of the declaration.
/// \param Map maps type or declaration to the extensions.
/// \param Selector selects diagnostic message: 0 for type and 1 for
/// declaration.
/// \return true if the type or declaration is disabled.
template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT>
bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo,
MapT &Map, unsigned Selector = 0,
SourceRange SrcRange = SourceRange());
/// Helper to keep information about the current `omp begin/end declare
/// variant` nesting.
struct OMPDeclareVariantScope {
/// The associated OpenMP context selector.
OMPTraitInfo *TI;
/// The associated OpenMP context selector mangling.
std::string NameSuffix;
OMPDeclareVariantScope(OMPTraitInfo &TI);
};
/// The current `omp begin/end declare variant` scopes.
SmallVector<OMPDeclareVariantScope, 4> OMPDeclareVariantScopes;
/// The declarator \p D defines a function in the scope \p S which is nested
/// in an `omp begin/end declare variant` scope. In this method we create a
/// declaration for \p D and rename \p D according to the OpenMP context
/// selector of the surrounding scope.
FunctionDecl *
ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(Scope *S,
Declarator &D);
/// Register \p FD as specialization of \p BaseFD in the current `omp
/// begin/end declare variant` scope.
void ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope(
FunctionDecl *FD, FunctionDecl *BaseFD);
public:
/// Can we exit a scope at the moment.
bool isInOpenMPDeclareVariantScope() {
return !OMPDeclareVariantScopes.empty();
}
/// Given the potential call expression \p Call, determine if there is a
/// specialization via the OpenMP declare variant mechanism available. If
/// there is, return the specialized call expression, otherwise return the
/// original \p Call.
ExprResult ActOnOpenMPCall(ExprResult Call, Scope *Scope,
SourceLocation LParenLoc, MultiExprArg ArgExprs,
SourceLocation RParenLoc, Expr *ExecConfig);
/// Handle a `omp begin declare variant`.
void ActOnOpenMPBeginDeclareVariant(SourceLocation Loc, OMPTraitInfo &TI);
/// Handle a `omp end declare variant`.
void ActOnOpenMPEndDeclareVariant();
/// Checks if the variant/multiversion functions are compatible.
bool areMultiversionVariantFunctionsCompatible(
const FunctionDecl *OldFD, const FunctionDecl *NewFD,
const PartialDiagnostic &NoProtoDiagID,
const PartialDiagnosticAt &NoteCausedDiagIDAt,
const PartialDiagnosticAt &NoSupportDiagIDAt,
const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported,
bool ConstexprSupported, bool CLinkageMayDiffer);
/// Function tries to capture lambda's captured variables in the OpenMP region
/// before the original lambda is captured.
void tryCaptureOpenMPLambdas(ValueDecl *V);
/// Return true if the provided declaration \a VD should be captured by
/// reference.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
/// \param OpenMPCaptureLevel Capture level within an OpenMP construct.
bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
unsigned OpenMPCaptureLevel) const;
/// Check if the specified variable is used in one of the private
/// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP
/// constructs.
VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false,
unsigned StopAt = 0);
ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
ExprObjectKind OK, SourceLocation Loc);
/// If the current region is a loop-based region, mark the start of the loop
/// construct.
void startOpenMPLoop();
/// If the current region is a range loop-based region, mark the start of the
/// loop construct.
void startOpenMPCXXRangeFor();
/// Check if the specified variable is used in 'private' clause.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
OpenMPClauseKind isOpenMPPrivateDecl(ValueDecl *D, unsigned Level,
unsigned CapLevel) const;
/// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.)
/// for \p FD based on DSA for the provided corresponding captured declaration
/// \p D.
void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level);
/// Check if the specified variable is captured by 'target' directive.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level,
unsigned CaptureLevel) const;
/// Check if the specified global variable must be captured by outer capture
/// regions.
/// \param Level Relative level of nested OpenMP construct for that
/// the check is performed.
bool isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level,
unsigned CaptureLevel) const;
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// End analysis of clauses.
void EndOpenMPClause();
/// Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
// OpenMP directives and clauses.
/// Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OpenMPDirectiveKind Kind);
/// Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Called on well-formed '#pragma omp allocate'.
DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc,
ArrayRef<Expr *> VarList,
ArrayRef<OMPClause *> Clauses,
DeclContext *Owner = nullptr);
/// Called on well-formed '#pragma omp requires'.
DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc,
ArrayRef<OMPClause *> ClauseList);
/// Check restrictions on Requires directive
OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc,
ArrayRef<OMPClause *> Clauses);
/// Check if the specified type is allowed to be used in 'omp declare
/// reduction' construct.
QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name,
ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes,
AccessSpecifier AS, Decl *PrevDeclInScope = nullptr);
/// Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner);
/// Initialize declare reduction construct initializer.
/// \return omp_priv variable.
VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer,
VarDecl *OmpPrivParm);
/// Called at the end of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid);
/// Check variable declaration in 'omp declare mapper' construct.
TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D);
/// Check if the specified type is allowed to be used in 'omp declare
/// mapper' construct.
QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare mapper'.
OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType,
SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS,
Decl *PrevDeclInScope = nullptr);
/// Build the mapper variable of '#pragma omp declare mapper'.
void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD,
Scope *S, QualType MapperType,
SourceLocation StartLoc,
DeclarationName VN);
/// Called at the end of '#pragma omp declare mapper'.
DeclGroupPtrTy
ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S,
ArrayRef<OMPClause *> ClauseList);
/// Called on the start of target region i.e. '#pragma omp declare target'.
bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc);
/// Called at the end of target region i.e. '#pragme omp end declare target'.
void ActOnFinishOpenMPDeclareTargetDirective();
/// Searches for the provided declaration name for OpenMP declare target
/// directive.
NamedDecl *
lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
NamedDeclSetType &SameDirectiveDecls);
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
OMPDeclareTargetDeclAttr::DevTypeTy DT);
/// Check declaration inside target region.
void
checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
SourceLocation IdLoc = SourceLocation());
/// Finishes analysis of the deferred functions calls that may be declared as
/// host/nohost during device/host compilation.
void finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller,
const FunctionDecl *Callee,
SourceLocation Loc);
/// Return true inside OpenMP declare target region.
bool isInOpenMPDeclareTargetContext() const {
return DeclareTargetNestingLevel > 0;
}
/// Return true inside OpenMP target region.
bool isInOpenMPTargetExecutionDirective() const;
/// Return the number of captured regions created for an OpenMP directive.
static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind);
/// Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
using VarsWithInheritedDSAType =
llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>;
/// Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp depobj'.
StmtResult ActOnOpenMPDepobjDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp scan'.
StmtResult ActOnOpenMPScanDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target data' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target enter data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target exit data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target parallel' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp taskloop' after parsing of the
/// associated statement.
StmtResult
ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target update'.
StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp distribute parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target simd' after parsing of
/// the associated statement.
StmtResult
ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target teams distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for
/// simd' after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Checks correctness of linear modifiers.
bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc);
/// Checks that the specified declaration matches requirements for the linear
/// decls.
bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
OpenMPLinearClauseKind LinKind, QualType Type,
bool IsDeclareSimd = false);
/// Called on well-formed '\#pragma omp declare simd' after parsing of
/// the associated method/function.
DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective(
DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS,
Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR);
/// Checks '\#pragma omp declare variant' variant function and original
/// functions after parsing of the associated method/function.
/// \param DG Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The trait info object representing the match clause.
/// \returns None, if the function/variant function are not compatible with
/// the pragma, pair of original function/variant ref expression otherwise.
Optional<std::pair<FunctionDecl *, Expr *>>
checkOpenMPDeclareVariantFunction(DeclGroupPtrTy DG, Expr *VariantRef,
OMPTraitInfo &TI, SourceRange SR);
/// Called on well-formed '\#pragma omp declare variant' after parsing of
/// the associated method/function.
/// \param FD Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The context traits associated with the function variant.
void ActOnOpenMPDeclareVariantDirective(FunctionDecl *FD, Expr *VariantRef,
OMPTraitInfo &TI, SourceRange SR);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocator' clause.
OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation NameModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simdlen' clause.
OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'ordered' clause.
OMPClause *
ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc,
SourceLocation LParenLoc = SourceLocation(),
Expr *NumForLoops = nullptr);
/// Called on well-formed 'grainsize' clause.
OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_tasks' clause.
OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'hint' clause.
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'detach' clause.
OMPClause *ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(llvm::omp::DefaultKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'order' clause.
OMPClause *ActOnOpenMPOrderClause(OpenMPOrderClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc,
SourceLocation EndLoc);
/// Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'acq_rel' clause.
OMPClause *ActOnOpenMPAcqRelClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'acquire' clause.
OMPClause *ActOnOpenMPAcquireClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'release' clause.
OMPClause *ActOnOpenMPReleaseClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'relaxed' clause.
OMPClause *ActOnOpenMPRelaxedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'destroy' clause.
OMPClause *ActOnOpenMPDestroyClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simd' clause.
OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nogroup' clause.
OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reverse_offload' clause.
OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dynamic_allocators' clause.
OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'atomic_default_mem_order' clause.
OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause(
OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *DepModOrTailExpr,
const OMPVarListLocTy &Locs, SourceLocation ColonLoc,
CXXScopeSpec &ReductionOrMapperIdScopeSpec,
DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier,
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit,
SourceLocation ExtraModifierLoc);
/// Called on well-formed 'inclusive' clause.
OMPClause *ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'exclusive' clause.
OMPClause *ActOnOpenMPExclusiveClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocate' clause.
OMPClause *
ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation ColonLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(
ArrayRef<Expr *> VarList, OpenMPLastprivateModifier LPKind,
SourceLocation LPKindLoc, SourceLocation ColonLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reduction' clause.
OMPClause *ActOnOpenMPReductionClause(
ArrayRef<Expr *> VarList, OpenMPReductionClauseModifier Modifier,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ModifierLoc, SourceLocation ColonLoc,
SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'task_reduction' clause.
OMPClause *ActOnOpenMPTaskReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'in_reduction' clause.
OMPClause *ActOnOpenMPInReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'linear' clause.
OMPClause *
ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind LinKind, SourceLocation LinLoc,
SourceLocation ColonLoc, SourceLocation EndLoc);
/// Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depobj' pseudo clause.
OMPClause *ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind,
SourceLocation DepLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'device' clause.
OMPClause *ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier,
Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ModifierLoc,
SourceLocation EndLoc);
/// Called on well-formed 'map' clause.
OMPClause *
ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'num_teams' clause.
OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'thread_limit' clause.
OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'priority' clause.
OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dist_schedule' clause.
OMPClause *ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc,
SourceLocation CommaLoc, SourceLocation EndLoc);
/// Called on well-formed 'defaultmap' clause.
OMPClause *ActOnOpenMPDefaultmapClause(
OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc);
/// Called on well-formed 'to' clause.
OMPClause *
ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'from' clause.
OMPClause *ActOnOpenMPFromClause(
ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'use_device_ptr' clause.
OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'use_device_addr' clause.
OMPClause *ActOnOpenMPUseDeviceAddrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'is_device_ptr' clause.
OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'nontemporal' clause.
OMPClause *ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Data for list of allocators.
struct UsesAllocatorsData {
/// Allocator.
Expr *Allocator = nullptr;
/// Allocator traits.
Expr *AllocatorTraits = nullptr;
/// Locations of '(' and ')' symbols.
SourceLocation LParenLoc, RParenLoc;
};
/// Called on well-formed 'uses_allocators' clause.
OMPClause *ActOnOpenMPUsesAllocatorClause(SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc,
ArrayRef<UsesAllocatorsData> Data);
/// Called on well-formed 'affinity' clause.
OMPClause *ActOnOpenMPAffinityClause(SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc, Expr *Modifier,
ArrayRef<Expr *> Locators);
/// The kind of conversion being performed.
enum CheckedConversionKind {
/// An implicit conversion.
CCK_ImplicitConversion,
/// A C-style cast.
CCK_CStyleCast,
/// A functional-style cast.
CCK_FunctionalCast,
/// A cast other than a C-style cast.
CCK_OtherCast,
/// A conversion for an operand of a builtin overloaded operator.
CCK_ForBuiltinOverloadedOp
};
static bool isCast(CheckedConversionKind CCK) {
return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast ||
CCK == CCK_OtherCast;
}
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E,
bool Diagnose = true);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This function is a no-op if the operand has a function type
// or an array type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
/// If \p E is a prvalue denoting an unmaterialized temporary, materialize
/// it as an xvalue. In C++98, the result will still be a prvalue, because
/// we don't have xvalues there.
ExprResult TemporaryMaterializationConversion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
/// Context in which we're performing a usual arithmetic conversion.
enum ArithConvKind {
/// An arithmetic operation.
ACK_Arithmetic,
/// A bitwise operation.
ACK_BitwiseOp,
/// A comparison.
ACK_Comparison,
/// A conditional (?:) operator.
ACK_Conditional,
/// A compound assignment expression.
ACK_CompAssign,
};
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, ArithConvKind ACK);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatibleFunctionPointer - The assignment is between two function
/// pointers types that are not compatible, but we accept them as an
/// extension.
IncompatibleFunctionPointer,
/// IncompatiblePointerSign - The assignment is between two pointers types
/// which point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerAddressSpaceMismatch - The assignment
/// changes address spaces in nested pointer types which is not allowed.
/// For instance, converting __private int ** to __generic int ** is
/// illegal even though __private could be converted to __generic.
IncompatibleNestedPointerAddressSpaceMismatch,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and optionally prepare for a conversion of
/// the RHS to the LHS type. The conversion is prepared for if ConvertRHS
/// is true.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind,
bool ConvertRHS = true);
/// Check assignment constraints for an assignment of RHS to LHSType.
///
/// \param LHSType The destination type for the assignment.
/// \param RHS The source expression for the assignment.
/// \param Diagnose If \c true, diagnostics may be produced when checking
/// for assignability. If a diagnostic is produced, \p RHS will be
/// set to ExprError(). Note that this function may still return
/// without producing a diagnostic, even for an invalid assignment.
/// \param DiagnoseCFAudited If \c true, the target is a function parameter
/// in an audited Core Foundation API and does not need to be checked
/// for ARC retain issues.
/// \param ConvertRHS If \c true, \p RHS will be updated to model the
/// conversions necessary to perform the assignment. If \c false,
/// \p Diagnose must also be \c false.
AssignConvertType CheckSingleAssignmentConstraints(
QualType LHSType, ExprResult &RHS, bool Diagnose = true,
bool DiagnoseCFAudited = false, bool ConvertRHS = true);
// If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit,
ImplicitConversionSequence& ICS);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
ExprResult PerformQualificationConversion(
Expr *E, QualType Ty, ExprValueKind VK = VK_RValue,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool IsCompAssign = false);
void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType CheckGNUVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
ExprResult &RHS,
SourceLocation QuestionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool ConvertArgs = true) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite =
FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
/// Type checking for matrix binary operators.
QualType CheckMatrixElementwiseOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
bool IsCompAssign);
QualType CheckMatrixMultiplyOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible - The two types are reference-compatible.
Ref_Compatible
};
// Fake up a scoped enumeration that still contextually converts to bool.
struct ReferenceConversionsScope {
/// The conversions that would be performed on an lvalue of type T2 when
/// binding a reference of type T1 to it, as determined when evaluating
/// whether T1 is reference-compatible with T2.
enum ReferenceConversions {
Qualification = 0x1,
NestedQualification = 0x2,
Function = 0x4,
DerivedToBase = 0x8,
ObjC = 0x10,
ObjCLifetime = 0x20,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/ObjCLifetime)
};
};
using ReferenceConversions = ReferenceConversionsScope::ReferenceConversions;
ReferenceCompareResult
CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2,
ReferenceConversions *Conv = nullptr);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
/// Prepare `SplattedExpr` for a vector splat operation, adding
/// implicit casts if necessary.
ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error };
/// Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds for ARC and Weak.
ARCConversionResult CheckObjCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage, SourceLocation lbrac,
SourceLocation rbrac, SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage);
/// If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
class ConditionResult {
Decl *ConditionVar;
FullExprArg Condition;
bool Invalid;
bool HasKnownValue;
bool KnownValue;
friend class Sema;
ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition,
bool IsConstexpr)
: ConditionVar(ConditionVar), Condition(Condition), Invalid(false),
HasKnownValue(IsConstexpr && Condition.get() &&
!Condition.get()->isValueDependent()),
KnownValue(HasKnownValue &&
!!Condition.get()->EvaluateKnownConstInt(S.Context)) {}
explicit ConditionResult(bool Invalid)
: ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid),
HasKnownValue(false), KnownValue(false) {}
public:
ConditionResult() : ConditionResult(false) {}
bool isInvalid() const { return Invalid; }
std::pair<VarDecl *, Expr *> get() const {
return std::make_pair(cast_or_null<VarDecl>(ConditionVar),
Condition.get());
}
llvm::Optional<bool> getKnownValue() const {
if (!HasKnownValue)
return None;
return KnownValue;
}
};
static ConditionResult ConditionError() { return ConditionResult(true); }
enum class ConditionKind {
Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'.
ConstexprIf, ///< A constant boolean condition from 'if constexpr'.
Switch ///< An integral condition for a 'switch' statement.
};
ConditionResult ActOnCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr, ConditionKind CK);
ConditionResult ActOnConditionVariable(Decl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E,
bool IsConstexpr = false);
/// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression
/// found in an explicit(bool) specifier.
ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E);
/// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier.
/// Returns true if the explicit specifier is now resolved.
bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0;
virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR);
virtual ~VerifyICEDiagnoser() { }
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr);
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
private:
unsigned ForceCUDAHostDeviceDepth = 0;
public:
/// Increments our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. So long as this count is greater
/// than zero, all functions encountered will be __host__ __device__.
void PushForceCUDAHostDevice();
/// Decrements our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. Returns false if the count is 0
/// before incrementing, so you can emit an error.
bool PopForceCUDAHostDevice();
/// Diagnostics that are emitted only if we discover that the given function
/// must be codegen'ed. Because handling these correctly adds overhead to
/// compilation, this is currently only enabled for CUDA compilations.
llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>,
std::vector<PartialDiagnosticAt>>
DeviceDeferredDiags;
/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
CanonicalDeclPtr<FunctionDecl> FD;
SourceLocation Loc;
};
/// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a
/// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the
/// same deferred diag twice.
llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags;
/// An inverse call graph, mapping known-emitted functions to one of their
/// known-emitted callers (plus the location of the call).
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
DeviceKnownEmittedFns;
/// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be
/// deferred.
///
/// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch)
/// which are not allowed to appear inside __device__ functions and are
/// allowed to appear in __host__ __device__ functions only if the host+device
/// function is never codegen'ed.
///
/// To handle this, we use the notion of "deferred diagnostics", where we
/// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed.
///
/// This class lets you emit either a regular diagnostic, a deferred
/// diagnostic, or no diagnostic at all, according to an argument you pass to
/// its constructor, thus simplifying the process of creating these "maybe
/// deferred" diagnostics.
class DeviceDiagBuilder {
public:
enum Kind {
/// Emit no diagnostics.
K_Nop,
/// Emit the diagnostic immediately (i.e., behave like Sema::Diag()).
K_Immediate,
/// Emit the diagnostic immediately, and, if it's a warning or error, also
/// emit a call stack showing how this function can be reached by an a
/// priori known-emitted function.
K_ImmediateWithCallStack,
/// Create a deferred diagnostic, which is emitted only if the function
/// it's attached to is codegen'ed. Also emit a call stack as with
/// K_ImmediateWithCallStack.
K_Deferred
};
DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID,
FunctionDecl *Fn, Sema &S);
DeviceDiagBuilder(DeviceDiagBuilder &&D);
DeviceDiagBuilder(const DeviceDiagBuilder &) = default;
~DeviceDiagBuilder();
/// Convertible to bool: True if we immediately emitted an error, false if
/// we didn't emit an error or we created a deferred error.
///
/// Example usage:
///
/// if (DeviceDiagBuilder(...) << foo << bar)
/// return ExprError();
///
/// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably
/// want to use these instead of creating a DeviceDiagBuilder yourself.
operator bool() const { return ImmediateDiag.hasValue(); }
template <typename T>
friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag,
const T &Value) {
if (Diag.ImmediateDiag.hasValue())
*Diag.ImmediateDiag << Value;
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second
<< Value;
return Diag;
}
private:
Sema &S;
SourceLocation Loc;
unsigned DiagID;
FunctionDecl *Fn;
bool ShowCallStack;
// Invariant: At most one of these Optionals has a value.
// FIXME: Switch these to a Variant once that exists.
llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag;
llvm::Optional<unsigned> PartialDiagId;
};
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as device code".
///
/// - If CurContext is a __host__ function, does not emit any diagnostics.
/// - If CurContext is a __device__ or __global__ function, emits the
/// diagnostics immediately.
/// - If CurContext is a __host__ __device__ function and we are compiling for
/// the device, creates a diagnostic which is emitted if and when we realize
/// that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in CUDA device code.
/// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget())
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as host code".
///
/// Same as CUDADiagIfDeviceCode, with "host" and "device" switched.
DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the device, emits the diagnostics immediately.
/// - If CurContext is a non-`declare target` function and we are compiling
/// for the device, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as host code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the host, emits the diagnostics immediately.
/// - If CurContext is a non-host function, just ignore it.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID);
DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID);
/// Check if the expression is allowed to be used in expressions for the
/// offloading devices.
void checkDeviceDecl(const ValueDecl *D, SourceLocation Loc);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
/// Determines whether the given function is a CUDA device/host/kernel/etc.
/// function.
///
/// Use this rather than examining the function's attributes yourself -- you
/// will get it wrong. Returns CFT_Host if D is null.
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D,
bool IgnoreImplicitHDAttr = false);
CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs);
/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentCUDATarget() {
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
}
static bool IsCUDAImplicitHostDeviceFunction(const FunctionDecl *D);
// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
CFP_Never, // Invalid caller/callee combination.
CFP_WrongSide, // Calls from host-device to host or device
// function that do not match current compilation
// mode.
CFP_HostDevice, // Any calls to host/device functions.
CFP_SameSide, // Calls from host-device to host or device
// function matching current compilation mode.
CFP_Native, // host-to-host or device-to-device calls.
};
/// Identifies relative preference of a given Caller/Callee
/// combination, based on their host/device attributes.
/// \param Caller function which needs address of \p Callee.
/// nullptr in case of global context.
/// \param Callee target function
///
/// \returns preference value for particular Caller/Callee combination.
CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee);
/// Determines whether Caller may invoke Callee, based on their CUDA
/// host/device attributes. Returns false if the call is not allowed.
///
/// Note: Will return true for CFP_WrongSide calls. These may appear in
/// semantically correct CUDA programs, but only if they're never codegen'ed.
bool IsAllowedCUDACall(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
return IdentifyCUDAPreference(Caller, Callee) != CFP_Never;
}
/// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD,
/// depending on FD and the current compilation settings.
void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD,
const LookupResult &Previous);
/// May add implicit CUDAConstantAttr attribute to VD, depending on VD
/// and current compilation settings.
void MaybeAddCUDAConstantAttr(VarDecl *VD);
public:
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// (CFP_Never), emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to
/// be emitted if and when the caller is codegen'ed, and returns true.
///
/// Will only create deferred diagnostics for a given SourceLocation once,
/// so you can safely call this multiple times without generating duplicate
/// deferred errors.
///
/// - Otherwise, returns true without emitting any diagnostics.
bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee);
/// Set __device__ or __host__ __device__ attributes on the given lambda
/// operator() method.
///
/// CUDA lambdas declared inside __device__ or __global__ functions inherit
/// the __device__ attribute. Similarly, lambdas inside __host__ __device__
/// functions become __host__ __device__ themselves.
void CUDASetLambdaAttrs(CXXMethodDecl *Method);
/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
void EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \return true if \p CD can be considered empty according to CUDA
/// (E.2.3.1 in CUDA 7.5 Programming guide).
bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD);
// \brief Checks that initializers of \p Var satisfy CUDA restrictions. In
// case of error emits appropriate diagnostic and invalidates \p Var.
//
// \details CUDA allows only empty constructors as initializers for global
// variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all
// __shared__ variables whether they are local or not (they all are implicitly
// static in CUDA). One exception is that CUDA allows constant initializers
// for __constant__ and __device__ variables.
void checkAllowedCUDAInitializer(VarDecl *VD);
/// Check whether NewFD is a valid overload for CUDA. Emits
/// diagnostics and invalidates NewFD if not.
void checkCUDATargetOverload(FunctionDecl *NewFD,
const LookupResult &Previous);
/// Copies target attributes from the template TD to the function FD.
void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD);
/// Returns the name of the launch configuration function. This is the name
/// of the function that will be called to configure kernel call, with the
/// parameters specified via <<<>>>.
std::string getCudaConfigureFuncName() const;
/// \name Code completion
//@{
/// Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// Code completion occurs within a class, struct, or union.
PCC_Class,
/// Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// Code completion occurs following one or more template
/// headers.
PCC_Template,
/// Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// Code completion occurs within an expression.
PCC_Expression,
/// Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// Code completion occurs where only a type is permitted.
PCC_Type,
/// Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteExpression(Scope *S, QualType PreferredType,
bool IsParenthesized = false);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase,
SourceLocation OpLoc, bool IsArrow,
bool IsBaseExprStatement,
QualType PreferredType);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS,
QualType PreferredType);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
/// Reports signatures for a call to CodeCompleteConsumer and returns the
/// preferred type for the current argument. Returned type can be null.
QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type,
SourceLocation Loc,
ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl,
CXXScopeSpec SS,
ParsedType TemplateTypeTy,
ArrayRef<Expr *> ArgExprs,
IdentifierInfo *II,
SourceLocation OpenParLoc);
void CodeCompleteInitializer(Scope *S, Decl *D);
/// Trigger code completion for a record of \p BaseType. \p InitExprs are
/// expressions in the initializer list seen so far and \p D is the current
/// Designation being parsed.
void CodeCompleteDesignator(const QualType BaseType,
llvm::ArrayRef<Expr *> InitExprs,
const Designation &D);
void CodeCompleteAfterIf(Scope *S);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext,
bool IsUsingDeclaration, QualType BaseType,
QualType PreferredType);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName,
SourceLocation ClassNameLoc,
bool IsBaseExprStatement);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled);
void CodeCompleteNaturalLanguage();
void CodeCompleteAvailabilityPlatformName();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
SourceLocation Loc);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
bool IsMemberFunction, SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
bool CheckObjCString(Expr *Arg);
ExprResult CheckOSLogFormatStringArg(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
bool CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckARMCoprocessorImmediate(const TargetInfo &TI, const Expr *CoprocArg,
bool WantCDE);
bool CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
bool SemaBuiltinVSX(CallExpr *TheCall);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
bool IsDelete);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low,
int High, bool RangeIsError = true);
bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
unsigned Multiple);
bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum);
bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum,
unsigned ArgBits);
bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum,
unsigned ArgBits);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_OSLog,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
bool FormatStringHasSArg(const StringLiteral *FExpr);
static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl);
void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
public:
void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS);
private:
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(const Expr *E);
/// Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// Check if there is a field shadowing.
void CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
const CXXRecordDecl *RD,
bool DeclIsField = true);
/// Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const ArrayRef<const Expr *> ExprArgs,
SourceLocation CallSiteLoc);
/// Check if we are taking the address of a packed field
/// as this may be a problem if the pointer value is dereferenced.
void CheckAddressOfPackedMember(Expr *rhs);
/// The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
/// The handler for the FileChanged preprocessor events.
///
/// Used for diagnostics that implement custom semantic analysis for #include
/// directives, like -Wpragma-pack.
sema::SemaPPCallbacks *SemaPPCallbackHandler;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
bool isCFError(RecordDecl *D);
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// Determine the number of levels of enclosing template parameters. This is
/// only usable while parsing. Note that this does not include dependent
/// contexts in which no template parameters have yet been declared, such as
/// in a terse function template or generic lambda before the first 'auto' is
/// encountered.
unsigned getTemplateDepth(Scope *S) const;
/// To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// Emitting members of dllexported classes is delayed until the class
// (including field initializers) is fully parsed.
SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses;
SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions;
private:
int ParsingClassDepth = 0;
class SavePendingParsedClassStateRAII {
public:
SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); }
~SavePendingParsedClassStateRAII() {
assert(S.DelayedOverridingExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedEquivalentExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
swapSavedState();
}
private:
Sema &S;
decltype(DelayedOverridingExceptionSpecChecks)
SavedOverridingExceptionSpecChecks;
decltype(DelayedEquivalentExceptionSpecChecks)
SavedEquivalentExceptionSpecChecks;
void swapSavedState() {
SavedOverridingExceptionSpecChecks.swap(
S.DelayedOverridingExceptionSpecChecks);
SavedEquivalentExceptionSpecChecks.swap(
S.DelayedEquivalentExceptionSpecChecks);
}
};
/// Helper class that collects misaligned member designations and
/// their location info for delayed diagnostics.
struct MisalignedMember {
Expr *E;
RecordDecl *RD;
ValueDecl *MD;
CharUnits Alignment;
MisalignedMember() : E(), RD(), MD(), Alignment() {}
MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment)
: E(E), RD(RD), MD(MD), Alignment(Alignment) {}
explicit MisalignedMember(Expr *E)
: MisalignedMember(E, nullptr, nullptr, CharUnits()) {}
bool operator==(const MisalignedMember &m) { return this->E == m.E; }
};
/// Small set of gathered accesses to potentially misaligned members
/// due to the packed attribute.
SmallVector<MisalignedMember, 4> MisalignedMembers;
/// Adds an expression to the set of gathered misaligned members.
void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment);
public:
/// Diagnoses the current set of gathered accesses. This typically
/// happens at full expression level. The set is cleared after emitting the
/// diagnostics.
void DiagnoseMisalignedMembers();
/// This function checks if the expression is in the sef of potentially
/// misaligned members and it is converted to some pointer type T with lower
/// or equal alignment requirements. If so it removes it. This is used when
/// we do not want to diagnose such misaligned access (e.g. in conversions to
/// void*).
void DiscardMisalignedMemberAddress(const Type *T, Expr *E);
/// This function calls Action when it determines that E designates a
/// misaligned member due to the packed attribute. This is used to emit
/// local diagnostics like in reference binding.
void RefersToMemberWithReducedAlignment(
Expr *E,
llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
Action);
/// Describes the reason a calling convention specification was ignored, used
/// for diagnostics.
enum class CallingConventionIgnoredReason {
ForThisTarget = 0,
VariadicFunction,
ConstructorDestructor,
BuiltinFunction
};
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurLexicalContext is a kernel function or it is known that the
/// function will be emitted for the device, emits the diagnostics
/// immediately.
/// - If CurLexicalContext is a function and we are compiling
/// for the device, but we don't know that this function will be codegen'ed
/// for devive yet, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// Diagnose __float128 type usage only from SYCL device code if the current
/// target doesn't support it
/// if (!S.Context.getTargetInfo().hasFloat128Type() &&
/// S.getLangOpts().SYCLIsDevice)
/// SYCLDiagIfDeviceCode(Loc, diag::err_type_unsupported) << "__float128";
DeviceDiagBuilder SYCLDiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed, creates a deferred diagnostic to be emitted if
/// and when the caller is codegen'ed, and returns true.
///
/// - Otherwise, returns true without emitting any diagnostics.
///
/// Adds Callee to DeviceCallGraph if we don't know if its caller will be
/// codegen'ed yet.
bool checkSYCLDeviceFunction(SourceLocation Loc, FunctionDecl *Callee);
};
/// RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
bool Entered = true;
public:
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other,
bool ShouldEnter = true)
: Actions(Actions), Entered(ShouldEnter) {
if (Entered)
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
ExprContext);
}
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(
NewContext, Sema::ReuseLambdaContextDecl, ExprContext);
}
enum InitListTag { InitList };
EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
bool ShouldEnter = true)
: Actions(Actions), Entered(false) {
// In C++11 onwards, narrowing checks are performed on the contents of
// braced-init-lists, even when they occur within unevaluated operands.
// Therefore we still need to instantiate constexpr functions used in such
// a context.
if (ShouldEnter && Actions.isUnevaluatedContext() &&
Actions.getLangOpts().CPlusPlus11) {
Actions.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::UnevaluatedList);
Entered = true;
}
}
~EnterExpressionEvaluationContext() {
if (Entered)
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// The template function declaration to be late parsed.
Decl *D;
};
} // end namespace clang
namespace llvm {
// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its
// SourceLocation.
template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc;
using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>;
static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
}
static FunctionDeclAndLoc getTombstoneKey() {
return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()};
}
static unsigned getHashValue(const FunctionDeclAndLoc &FDL) {
return hash_combine(FDBaseInfo::getHashValue(FDL.FD),
FDL.Loc.getRawEncoding());
}
static bool isEqual(const FunctionDeclAndLoc &LHS,
const FunctionDeclAndLoc &RHS) {
return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc;
}
};
} // namespace llvm
#endif
|
2013.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "correlation.h"
/* Array initialization. */
static
void init_array (int m,
int n,
DATA_TYPE *float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n))
{
int i, j;
*float_n = 1.2;
for (i = 0; i < m; i++)
for (j = 0; j < n; j++)
data[i][j] = ((DATA_TYPE) i*j) / M;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int m,
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m))
{
int i, j;
for (i = 0; i < m; i++)
for (j = 0; j < m; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]);
if ((i * m + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_correlation(int m, int n,
DATA_TYPE float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n),
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m),
DATA_TYPE POLYBENCH_1D(mean,M,m),
DATA_TYPE POLYBENCH_1D(stddev,M,m))
{
int i, j, j1, j2;
DATA_TYPE eps = 0.1f;
#define sqrt_of_array_cell(x,j) sqrt(x[j])
#pragma scop
/* Determine mean of column vectors of input data matrix */
#pragma omp parallel private(i, j, j2) num_threads(#P11) #same issue as atax
{
#pragma omp parallel for schedule(dynamic, 4) simd num_threads(4)
for (j = 0; j < _PB_M; j++)
{
mean[j] = 0.0;
for (i = 0; i < _PB_N; i++)
mean[j] += data[i][j];
mean[j] /= float_n;
}
/* Determine standard deviations of column vectors of data matrix. */
#pragma omp parallel for schedule(dynamic, 4) simd num_threads(4)
for (j = 0; j < _PB_M; j++)
{
stddev[j] = 0.0;
for (i = 0; i < _PB_N; i++)
stddev[j] += (data[i][j] - mean[j]) * (data[i][j] - mean[j]);
stddev[j] /= float_n;
stddev[j] = sqrt_of_array_cell(stddev, j);
/* The following in an inelegant but usual way to handle
near-zero std. dev. values, which below would cause a zero-
divide. */
stddev[j] = stddev[j] <= eps ? 1.0 : stddev[j];
}
/* Center and reduce the column vectors. */
#pragma omp parallel for schedule(dynamic, 4) simd num_threads(4)
for (i = 0; i < _PB_N; i++)
{
#pragma omp parallel for schedule(dynamic, 4) simd num_threads(4)
for (j = 0; j < _PB_M; j++)
{
data[i][j] -= mean[j];
data[i][j] /= sqrt(float_n) * stddev[j];
}
}
/* Calculate the m * m correlation matrix. */
#pragma omp parallel for schedule(dynamic, 4) simd num_threads(4)
for (j1 = 0; j1 < _PB_M-1; j1++)
{
symmat[j1][j1] = 1.0;
for (j2 = j1+1; j2 < _PB_M; j2++)
{
symmat[j1][j2] = 0.0;
for (i = 0; i < _PB_N; i++)
symmat[j1][j2] += (data[i][j1] * data[i][j2]);
symmat[j2][j1] = symmat[j1][j2];
}
}
}
#pragma endscop
symmat[_PB_M-1][_PB_M-1] = 1.0;
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int n = N;
int m = M;
/* Variable declaration/allocation. */
DATA_TYPE float_n;
POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n);
POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m);
POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m);
POLYBENCH_1D_ARRAY_DECL(stddev,DATA_TYPE,M,m);
/* Initialize array(s). */
init_array (m, n, &float_n, POLYBENCH_ARRAY(data));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_correlation (m, n, float_n,
POLYBENCH_ARRAY(data),
POLYBENCH_ARRAY(symmat),
POLYBENCH_ARRAY(mean),
POLYBENCH_ARRAY(stddev));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(data);
POLYBENCH_FREE_ARRAY(symmat);
POLYBENCH_FREE_ARRAY(mean);
POLYBENCH_FREE_ARRAY(stddev);
return 0;
}
|
omp.c | // note not doing O0 below as to ensure we get tbaa
// TODO: %clang -fopenmp -std=c11 -fno-vectorize -fno-unroll-loops -O1 -disable-llvm-optzns %s -S -emit-llvm -o - | %opt - %loadEnzyme -enzyme -S | %clang -fopenmp -x ir - && ./a.out
// RUN: %clang -fopenmp -std=c11 -fno-vectorize -fno-unroll-loops -O1 %s -S -emit-llvm -o - | %opt - %loadEnzyme -enzyme -S | %clang -fopenmp -x ir - && ./a.out
// RUN: %clang -fopenmp -std=c11 -fno-vectorize -fno-unroll-loops -O2 %s -S -emit-llvm -o - | %opt - %loadEnzyme -enzyme -S | %clang -fopenmp -x ir - && ./a.out
// RUN: %clang -fopenmp -std=c11 -fno-vectorize -fno-unroll-loops -O3 %s -S -emit-llvm -o - | %opt - %loadEnzyme -enzyme -S | %clang -fopenmp -x ir - && ./a.out
// note not doing O0 below as to ensure we get tbaa
// TODO: %clang -fopenmp -std=c11 -fno-vectorize -fno-unroll-loops -O1 -Xclang -disable-llvm-optzns %s -S -emit-llvm -o - | %opt - %loadEnzyme -enzyme -enzyme-inline=1 -S | %clang -fopenmp -x ir - && ./a.out
// RUN: %clang -fopenmp -std=c11 -fno-vectorize -fno-unroll-loops -O1 %s -S -emit-llvm -o - | %opt - %loadEnzyme -enzyme -enzyme-inline=1 -S | %clang -fopenmp -x ir - && ./a.out
// RUN: %clang -fopenmp -std=c11 -fno-vectorize -fno-unroll-loops -O2 %s -S -emit-llvm -o - | %opt - %loadEnzyme -enzyme -enzyme-inline=1 -S | %clang -fopenmp -x ir - && ./a.out
// RUN: %clang -fopenmp -std=c11 -fno-vectorize -fno-unroll-loops -O3 %s -S -emit-llvm -o - | %opt - %loadEnzyme -enzyme -enzyme-inline=1 -S | %clang -fopenmp -x ir - && ./a.out
#include <stdio.h>
#include <math.h>
#include <assert.h>
#include "test_utils.h"
double __enzyme_autodiff(void*, ...);
/*
void omp(float& a, int N) {
#define N 20
#pragma omp parallel for
for (int i=0; i<N; i++) {
//a[i] *= a[i];
(&a)[i] *= (&a)[i];
}
#undef N
(&a)[0] = 0;
}
*/
void omp(float* a, int N) {
#pragma omp parallel for
for (int i=0; i<N; i++) {
//a[i] *= a[i];
a[i] *= a[i];
}
a[0] = 0;
}
int main(int argc, char** argv) {
int N = 20;
float a[N];
for(int i=0; i<N; i++) {
a[i] = i+1;
}
float d_a[N];
for(int i=0; i<N; i++)
d_a[i] = 1.0f;
//omp(*a, N);
printf("ran omp\n");
__enzyme_autodiff((void*)omp, a, d_a, N);
for(int i=0; i<N; i++) {
printf("a[%d]=%f d_a[%d]=%f\n", i, a[i], i, d_a[i]);
}
//APPROX_EQ(da, 17711.0*2, 1e-10);
//APPROX_EQ(db, 17711.0*2, 1e-10);
//printf("hello! %f, res2 %f, da: %f, db: %f\n", ret, ret, da,db);
APPROX_EQ(d_a[0], 0.0f, 1e-10);
for(int i=1; i<N; i++) {
APPROX_EQ(d_a[i], 2.0f*(i+1), 1e-10);
}
return 0;
}
|
openmptest.c | #include<stdio.h>
#include<stdlib.h>
#include<string.h>
#include</usr/include/stdint.h>
#include<stdbool.h>
#include "db.h"
int listSize;
uint32_t maxValue;
double seq;
int output = 0;
uint32_t* list;
struct cgmResult{
int read[3]; // 16 bases in each int
uint32_t* matches; // pointer to list of match locations
int length; // length of match list
};
struct db{
int x;
};
int remove_dups(uint32_t* list, int length)
{
int i, j = 0;
for(i = 0; i < length; i++)
{
if(0 <= i-1 && list[i] == list[i-1])
i++;
else{
list[j] = list[i];
j++;
}
}
return j;
}
int uint32_t_cmp(const void *a, const void *b)
{
const uint32_t* cp_a = (const int*) a;
const uint32_t* cp_b = (const int*) b;
return *cp_a - *cp_b;
}
uint32_t dbtable[3][5];
int num = 0;
int32_t db_query (struct db *d, uint32_t key, uint32_t **values)
{
*values = list;
return listSize;
}
int mergeLists(uint32_t* a, uint32_t* b, uint32_t* c, int aLength, int bLength, int cLength, uint32_t** result)
{
int i = 0, j = 0, k = 0, count = 0;
uint32_t* mem = NULL;
/* result should be combined length of all lists */
int max = aLength+bLength+cLength;
mem = (uint32_t*) malloc(sizeof(uint32_t)*max);
if(mem == NULL){
printf("Unable to allocate memory!\n");
exit(-1);
}
/* add items in increasing order */
while(count < max)
{
if(i < aLength && (j >= bLength || a[i] <= b[j]) && (k >= cLength || a[i] <= c[k])){
mem[count] = a[i];
i++;
}
else if(j < bLength && (k >= cLength || b[j] <= c[k])){
mem[count] = b[j];
j++;
}
else{
mem[count] = c[k];
k++;
}
count++;
}
/* return the size of the results list */
*result = mem;
return count;
}
int doubleMatch(uint32_t* a, uint32_t* b, int aLength, int bLength, uint32_t secLength, uint32_t** matches, uint32_t gap, uint32_t startOffset)
{
int i = 0, j= 0, mLength = 0;
uint32_t* dubs = NULL;
if(aLength == 0 || bLength == 0)
{
*matches == NULL;
return 0;
}
/* maximum length is length of smaller list */
int mMax = aLength;
if(bLength < mMax)
mMax = bLength;
dubs = (uint32_t*) malloc(sizeof(uint32_t)*mMax);
if(dubs == NULL){
printf("Unable to allocate memory!\n");
exit(-1);
}
/* loop through the items in the first list looking for matching items in the second list */
while(i < aLength && j < bLength){
while(j < bLength){
if(b[j] < a[i] + secLength + gap)
j++;
else if(b[j] > a[i] + secLength + gap)
break;
else{
if(startOffset < a[i]){
dubs[mLength] = a[i]-startOffset;
mLength++;
}
break;
}
}
i++;
}
/* if results were found, return them, otherwise free the memory */
if(mLength > 0){
*matches = dubs;
return mLength;
}
free(dubs);
*matches = NULL;
return 0;
}
int cgm_solver(uint32_t a, uint32_t b, uint32_t c, uint32_t** matches, struct db* database)
{
int sections, secLength, aLength, bLength, cLength;
int double1, double2, double3;
int triple;
int count;
struct timeval t1, t2;
double elapsedTime;
uint32_t* dubMatches1 = NULL;
uint32_t* dubMatches2 = NULL;
uint32_t* dubMatches3 = NULL;
uint32_t* tripMatches = NULL;
uint32_t* temp;
int keySize = 16;
uint32_t* aList = NULL;
uint32_t* bList = NULL;
uint32_t* cList = NULL;
aLength = db_query(database, a, &aList);
bLength = db_query(database, b, &bList);
cLength = db_query(database, c, &cList);
double1 = doubleMatch(aList, bList, aLength, bLength, keySize, &dubMatches1, 0, 0);
double2 = doubleMatch(aList, cList, aLength, cLength, keySize, &dubMatches2, keySize, 0);
double3 = doubleMatch(bList, cList, bLength, cLength, keySize, &dubMatches3, 0, keySize);
triple = doubleMatch(dubMatches1, dubMatches2, double1, double2, 0, &tripMatches, 0, 0);
if(triple > 0){
*matches = tripMatches;
count = triple;
}
else if(double1 + double2 + double3 > 0)
{
count = mergeLists(dubMatches1, dubMatches2, dubMatches3, double1, double2, double3, &temp);
*matches = temp;
}
else
{
count = mergeLists(aList, bList, cList, aLength, bLength, cLength, &temp);
*matches = temp;
}
/* free any allocated memory and return the number items in matches */
free(dubMatches1);
free(dubMatches2);
free(dubMatches3);
if(triple == 0)
free(tripMatches);
if(output == 1){
#pragma omp critical
{
int i;
printf("AList: ");
for(i = 0; i < aLength; i++)
printf("%d ", aList[i]);
printf("\nBList (-16): ");
for(i = 0; i < bLength; i++)
printf("%d ", bList[i]-16);
printf("\nCList (-32): ");
for(i = 0; i < cLength; i++)
printf("%d ", cList[i]-32);
printf("\nMatches: ");
for(i = 0; i < count; i++)
printf("%d ", (*matches)[i]);
printf("\n\n***********************************\n\n");
}
}
return count;
}
int cgm(int** reads, uint32_t numReads, int chunkSize, struct db* database)
{
int i;
struct timeval t1, t2;
double overall = 0;
gettimeofday(&t1, NULL);
#pragma omp parallel for schedule(dynamic, chunkSize)
for(i = 0; i < numReads; i++)
{
uint32_t* matches = NULL;
cgm_solver(0, 0, 0, &matches, database);
free(matches);
}
// int j = fgmStart(results, i);
gettimeofday(&t2, NULL);
overall = (t2.tv_sec - t1.tv_sec) * 1000.0; // sec to ms
overall += (t2.tv_usec - t1.tv_usec) / 1000.0; // us to ms
printf("\n*****Result*****:\n");
printf("Average Time (w/ generating lists and overhead): %f\n", overall/numReads);
printf("Total Time (w/ generating lists and overhead): %f\n", overall);
return i;
}
int main(int argc, char* argv[])
{
if(argc != 6 && argc != 7){
printf("USAGE: cgm MAXVALUE LISTSIZE THREADS CHUNKSIZE REPS [-o]\n-o is used to print list values to cout");
exit(-1);
}
maxValue = (uint32_t) strtoul(argv[1], NULL, 10);
listSize = atoi(argv[2]);
int threads = atoi(argv[3]);
int p = atoi(argv[4]);
int reps = atoi(argv[5]);
if(argc == 7 && argv[6][0] == '-' && argv[6][1] == 'o')
output = 1;
int j;
list = (uint32_t*) malloc(sizeof(uint32_t)*listSize);
for(j = 0; j < listSize; j++)
list[j] = rand() % maxValue;
qsort(list, listSize, sizeof(uint32_t), uint32_t_cmp);
listSize = remove_dups(list,listSize);
omp_set_num_threads(threads);
uint32_t* results = NULL;
struct db mydb;
srand( (unsigned)time(NULL));
int** reads = NULL;
cgm(reads,reps,p,&mydb);
return 0;
}
|
TGV_core.c | /*
This work is part of the Core Imaging Library developed by
Visual Analytics and Imaging System Group of the Science Technology
Facilities Council, STFC
Copyright 2017 Daniil Kazantsev
Copyright 2017 Srikanth Nagella, Edoardo Pasca
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "TGV_core.h"
/* C-OMP implementation of Primal-Dual denoising method for
* Total Generilized Variation (TGV)-L2 model [1] (2D case only)
*
* Input Parameters:
* 1. Noisy image (2D)
* 2. lambda - regularisation parameter
* 3. parameter to control the first-order term (alpha1)
* 4. parameter to control the second-order term (alpha0)
* 5. Number of Chambolle-Pock (Primal-Dual) iterations
* 6. Lipshitz constant (default is 12)
*
* Output:
* Filtered/regulariaed image
*
* References:
* [1] K. Bredies "Total Generalized Variation"
*/
float TGV_main(float *U0, float *U, float lambda, float alpha1, float alpha0, int iter, float L2, int dimX, int dimY)
{
long DimTotal;
int ll;
float *U_old, *P1, *P2, *Q1, *Q2, *Q3, *V1, *V1_old, *V2, *V2_old, tau, sigma;
DimTotal = (long)(dimX*dimY);
/* dual variables */
P1 = calloc(DimTotal, sizeof(float));
P2 = calloc(DimTotal, sizeof(float));
Q1 = calloc(DimTotal, sizeof(float));
Q2 = calloc(DimTotal, sizeof(float));
Q3 = calloc(DimTotal, sizeof(float));
U_old = calloc(DimTotal, sizeof(float));
V1 = calloc(DimTotal, sizeof(float));
V1_old = calloc(DimTotal, sizeof(float));
V2 = calloc(DimTotal, sizeof(float));
V2_old = calloc(DimTotal, sizeof(float));
copyIm(U0, U, (long)(dimX), (long)(dimY), 1l); /* initialize */
tau = pow(L2,-0.5);
sigma = pow(L2,-0.5);
/* Primal-dual iterations begin here */
for(ll = 0; ll < iter; ll++) {
/* Calculate Dual Variable P */
DualP_2D(U, V1, V2, P1, P2, (long)(dimX), (long)(dimY), sigma);
/*Projection onto convex set for P*/
ProjP_2D(P1, P2, (long)(dimX), (long)(dimY), alpha1);
/* Calculate Dual Variable Q */
DualQ_2D(V1, V2, Q1, Q2, Q3, (long)(dimX), (long)(dimY), sigma);
/*Projection onto convex set for Q*/
ProjQ_2D(Q1, Q2, Q3, (long)(dimX), (long)(dimY), alpha0);
/*saving U into U_old*/
copyIm(U, U_old, (long)(dimX), (long)(dimY), 1l);
/*adjoint operation -> divergence and projection of P*/
DivProjP_2D(U, U0, P1, P2, (long)(dimX), (long)(dimY), lambda, tau);
/*get updated solution U*/
newU(U, U_old, (long)(dimX), (long)(dimY));
/*saving V into V_old*/
copyIm(V1, V1_old, (long)(dimX), (long)(dimY), 1l);
copyIm(V2, V2_old, (long)(dimX), (long)(dimY), 1l);
/* upd V*/
UpdV_2D(V1, V2, P1, P2, Q1, Q2, Q3, (long)(dimX), (long)(dimY), tau);
/*get new V*/
newU(V1, V1_old, (long)(dimX), (long)(dimY));
newU(V2, V2_old, (long)(dimX), (long)(dimY));
} /*end of iterations*/
/*freeing*/
free(P1);free(P2);free(Q1);free(Q2);free(Q3);free(U_old);
free(V1);free(V2);free(V1_old);free(V2_old);
return *U;
}
/********************************************************************/
/***************************2D Functions*****************************/
/********************************************************************/
/*Calculating dual variable P (using forward differences)*/
float DualP_2D(float *U, float *V1, float *V2, float *P1, float *P2, long dimX, long dimY, float sigma)
{
long i,j, index;
#pragma omp parallel for shared(U,V1,V2,P1,P2) private(i,j,index)
for(i=0; i<dimX; i++) {
for(j=0; j<dimY; j++) {
index = j*dimX+i;
/* symmetric boundary conditions (Neuman) */
if (i == dimX-1) P1[index] += sigma*((U[j*dimX+(i-1)] - U[index]) - V1[index]);
else P1[index] += sigma*((U[j*dimX+(i+1)] - U[index]) - V1[index]);
if (j == dimY-1) P2[index] += sigma*((U[(j-1)*dimX+i] - U[index]) - V2[index]);
else P2[index] += sigma*((U[(j+1)*dimX+i] - U[index]) - V2[index]);
}}
return 1;
}
/*Projection onto convex set for P*/
float ProjP_2D(float *P1, float *P2, long dimX, long dimY, float alpha1)
{
float grad_magn;
long i,j,index;
#pragma omp parallel for shared(P1,P2) private(i,j,index,grad_magn)
for(i=0; i<dimX; i++) {
for(j=0; j<dimY; j++) {
index = j*dimX+i;
grad_magn = sqrt(pow(P1[index],2) + pow(P2[index],2));
grad_magn = grad_magn/alpha1;
if (grad_magn > 1.0) {
P1[index] /= grad_magn;
P2[index] /= grad_magn;
}
}}
return 1;
}
/*Calculating dual variable Q (using forward differences)*/
float DualQ_2D(float *V1, float *V2, float *Q1, float *Q2, float *Q3, long dimX, long dimY, float sigma)
{
long i,j,index;
float q1, q2, q11, q22;
#pragma omp parallel for shared(Q1,Q2,Q3,V1,V2) private(i,j,index,q1,q2,q11,q22)
for(i=0; i<dimX; i++) {
for(j=0; j<dimY; j++) {
index = j*dimX+i;
/* symmetric boundary conditions (Neuman) */
if (i == dimX-1)
{ q1 = (V1[j*dimX+(i-1)] - V1[index]);
q11 = (V2[j*dimX+(i-1)] - V2[index]);
}
else {
q1 = (V1[j*dimX+(i+1)] - V1[index]);
q11 = (V2[j*dimX+(i+1)] - V2[index]);
}
if (j == dimY-1) {
q2 = (V2[(j-1)*dimX+i] - V2[index]);
q22 = (V1[(j-1)*dimX+i] - V1[index]);
}
else {
q2 = V2[(j+1)*dimX+i] - V2[index];
q22 = V1[(j+1)*dimX+i] - V1[index];
}
Q1[index] += sigma*(q1);
Q2[index] += sigma*(q2);
Q3[index] += sigma*(0.5f*(q11 + q22));
}}
return 1;
}
float ProjQ_2D(float *Q1, float *Q2, float *Q3, long dimX, long dimY, float alpha0)
{
float grad_magn;
long i,j,index;
#pragma omp parallel for shared(Q1,Q2,Q3) private(i,j,index,grad_magn)
for(i=0; i<dimX; i++) {
for(j=0; j<dimY; j++) {
index = j*dimX+i;
grad_magn = sqrt(pow(Q1[index],2) + pow(Q2[index],2) + 2*pow(Q3[index],2));
grad_magn = grad_magn/alpha0;
if (grad_magn > 1.0) {
Q1[index] /= grad_magn;
Q2[index] /= grad_magn;
Q3[index] /= grad_magn;
}
}}
return 1;
}
/* Divergence and projection for P*/
float DivProjP_2D(float *U, float *U0, float *P1, float *P2, long dimX, long dimY, float lambda, float tau)
{
long i,j,index;
float P_v1, P_v2, div;
#pragma omp parallel for shared(U,U0,P1,P2) private(i,j,index,P_v1,P_v2,div)
for(i=0; i<dimX; i++) {
for(j=0; j<dimY; j++) {
index = j*dimX+i;
if (i == 0) P_v1 = P1[index];
else P_v1 = P1[index] - P1[j*dimX+(i-1)];
if (j == 0) P_v2 = P2[index];
else P_v2 = P2[index] - P2[(j-1)*dimX+i];
div = P_v1 + P_v2;
U[index] = (lambda*(U[index] + tau*div) + tau*U0[index])/(lambda + tau);
}}
return *U;
}
/*get updated solution U*/
float newU(float *U, float *U_old, long dimX, long dimY)
{
long i;
#pragma omp parallel for shared(U,U_old) private(i)
for(i=0; i<dimX*dimY; i++) U[i] = 2*U[i] - U_old[i];
return *U;
}
/*get update for V*/
float UpdV_2D(float *V1, float *V2, float *P1, float *P2, float *Q1, float *Q2, float *Q3, long dimX, long dimY, float tau)
{
long i, j, index;
float q1, q11, q2, q22, div1, div2;
#pragma omp parallel for shared(V1,V2,P1,P2,Q1,Q2,Q3) private(i, j, index, q1, q11, q2, q22, div1, div2)
for(i=0; i<dimX; i++) {
for(j=0; j<dimY; j++) {
index = j*dimX+i;
/* symmetric boundary conditions (Neuman) */
if (i == 0) {
q1 = Q1[index];
q11 = Q3[index];
}
else {
q1 = Q1[index] - Q1[j*dimX+(i-1)];
q11 = Q3[index] - Q3[j*dimX+(i-1)];
}
if (j == 0) {
q2 = Q2[index];
q22 = Q3[index];
}
else {
q2 = Q2[index] - Q2[(j-1)*dimX+i];
q22 = Q3[index] - Q3[(j-1)*dimX+i];
}
div1 = q1 + q22;
div2 = q2 + q11;
V1[index] += tau*(P1[index] + div1);
V2[index] += tau*(P2[index] + div2);
}}
return 1;
}
|
binary_move_generator.h | /*****************************************************************************/
// Copyright (c) 2020-2021 Yuji KOGUMA
// Released under the MIT license
// https://opensource.org/licenses/mit-license.php
/*****************************************************************************/
#ifndef PRINTEMPS_NEIGHBORHOOD_BINARY_MOVE_GENERATOR_H__
#define PRINTEMPS_NEIGHBORHOOD_BINARY_MOVE_GENERATOR_H__
#include "abstract_move_generator.h"
namespace printemps {
namespace neighborhood {
/*****************************************************************************/
template <class T_Variable, class T_Expression>
class BinaryMoveGenerator
: public AbstractMoveGenerator<T_Variable, T_Expression> {
private:
public:
/*************************************************************************/
BinaryMoveGenerator(void) {
/// nothing to do
}
/*************************************************************************/
virtual ~BinaryMoveGenerator(void) {
/// nothing to do
}
/*************************************************************************/
void setup(
const std::vector<model_component::Variable<T_Variable, T_Expression> *>
&a_RAW_VARIABLE_PTRS) {
/**
* "Flip" move for binary variables:
* e.g) binary variable x \in {0, 1}
* move: {(x = 1)} (if x = 0)
* {(x = 0)} (if x = 1)
*/
/**
* Extract mutable variables.
*/
auto mutable_variable_ptrs =
extract_mutable_variable_ptrs(a_RAW_VARIABLE_PTRS);
/**
* Setup move objects.
*/
const int VARIABLES_SIZE = mutable_variable_ptrs.size();
this->m_moves.resize(VARIABLES_SIZE);
this->m_flags.resize(VARIABLES_SIZE);
for (auto i = 0; i < VARIABLES_SIZE; i++) {
this->m_moves[i].sense = MoveSense::Binary;
this->m_moves[i].related_constraint_ptrs =
mutable_variable_ptrs[i]->related_constraint_ptrs();
this->m_moves[i].alterations.emplace_back(mutable_variable_ptrs[i],
0);
this->m_moves[i].is_univariable_move = true;
this->m_moves[i].is_special_neighborhood_move = false;
this->m_moves[i].is_available = true;
this->m_moves[i].overlap_rate = 0.0;
}
/**
* Setup move updater.
*/
auto move_updater = //
[this, mutable_variable_ptrs, VARIABLES_SIZE](
auto * a_moves, //
auto * a_flags, //
const bool a_ACCEPT_ALL, //
const bool a_ACCEPT_OBJECTIVE_IMPROVABLE, //
const bool a_ACCEPT_FEASIBILITY_IMPROVABLE, //
[[maybe_unused]] const bool a_IS_ENABLED_PARALLEL) {
#ifdef _OPENMP
#pragma omp parallel for if (a_IS_ENABLED_PARALLEL) schedule(static)
#endif
for (auto i = 0; i < VARIABLES_SIZE; i++) {
if (a_ACCEPT_ALL ||
(a_ACCEPT_OBJECTIVE_IMPROVABLE &&
mutable_variable_ptrs[i]->is_objective_improvable()) ||
(a_ACCEPT_FEASIBILITY_IMPROVABLE &&
mutable_variable_ptrs[i]
->is_feasibility_improvable())) {
(*a_moves)[i].alterations.front().second =
1 - mutable_variable_ptrs[i]->value();
(*a_flags)[i] = 1;
} else {
(*a_flags)[i] = 0;
}
}
};
this->m_move_updater = move_updater;
}
};
} // namespace neighborhood
} // namespace printemps
#endif
/*****************************************************************************/
// END
/*****************************************************************************/ |
enq_deq_pairs.c | /* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
/*
* See COPYRIGHT in top-level directory.
*/
#include <stdlib.h>
#include <stdio.h>
#include <omp.h>
#include "zmtest_absqueue.h"
#define TEST_NELEMTS 1000
/*-------------------------------------------------------------------------
* Function: run
*
* Purpose: Test the correctness of queue operations by counting the number
* of dequeued elements to the expected number
*
* Return: Success: 0
* Failure: 1
*-------------------------------------------------------------------------
*/
static inline void run() {
int max_threads = omp_get_max_threads();
zm_absqueue_t* queues = malloc (max_threads/2 * sizeof(zm_absqueue_t));
double t1, t2;
printf("#threads \t throughput ops/s\n");
int nthreads;
for (nthreads = 2; nthreads <= max_threads; nthreads += 2) {
int i;
for(i=0; i< nthreads/2; i++)
zm_absqueue_init(&queues[i]);
int nelem_enq, nelem_deq;
nelem_enq = TEST_NELEMTS/(nthreads/2);
nelem_deq = nelem_enq;
t1 = omp_get_wtime();
#pragma omp parallel num_threads(nthreads)
{
int tid, producer_b, qidx;
#if defined(ZMTEST_ALLOC_QELEM)
int *input;
#else
int input = 1;
#endif
tid = omp_get_thread_num();
producer_b = (tid % 2 == 0);
qidx = tid/2;
unsigned deq_count = 0;
int elem;
if(producer_b) { /* producer */
for(elem=0; elem < nelem_enq; elem++) {
#if defined(ZMTEST_ALLOC_QELEM)
input = malloc(sizeof *input);
*input = 1;
zm_absqueue_enqueue(&queues[qidx], (void*) input);
#else
zm_absqueue_enqueue(&queues[qidx], (void*) &input);
#endif
}
} else { /* consumer */
while(deq_count < nelem_deq) {
int* elem = NULL;
zm_absqueue_dequeue(&queues[qidx], (void**)&elem);
if ((elem != NULL) && (*elem == 1)) {
deq_count++;
#if defined(ZMTEST_ALLOC_QELEM)
free(elem);
#endif
}
}
}
}
t2 = omp_get_wtime();
printf("%d \t %lf\n", nthreads, (double)nelem_deq*nthreads/(t2-t1));
}
} /* end run() */
int main(int argc, char **argv) {
run();
} /* end main() */
|
GrB_Vector_nvals.c | //------------------------------------------------------------------------------
// GrB_Vector_nvals: number of entries in a sparse vector
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
#include "GB.h"
GrB_Info GrB_Vector_nvals // get the number of entries in a vector
(
GrB_Index *nvals, // number of entries
const GrB_Vector v // vector to query
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GB_WHERE1 ("GrB_Vector_nvals (&nvals, v)") ;
GB_BURBLE_START ("GrB_Vector_nvals") ;
GB_RETURN_IF_NULL_OR_FAULTY (v) ;
ASSERT (GB_VECTOR_OK (v)) ;
//--------------------------------------------------------------------------
// get the number of entries
//--------------------------------------------------------------------------
GrB_Info info = GB_nvals (nvals, (GrB_Matrix) v, Context) ;
GB_BURBLE_END ;
#pragma omp flush
return (info) ;
}
|
displacement_criteria.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi
//
//
#if !defined(KRATOS_DISPLACEMENT_CRITERIA )
#define KRATOS_DISPLACEMENT_CRITERIA
/* System includes */
/* External includes */
/* Project includes */
#include "includes/model_part.h"
#include "includes/define.h"
#include "solving_strategies/convergencecriterias/convergence_criteria.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class DisplacementCriteria
* @ingroup KratosCore
* @brief This is a convergence criteria that employes the increment on the solution as criteria
* @details The reactions from the RHS are not computed in the solution
* @author Riccardo Rossi
*/
template<class TSparseSpace,
class TDenseSpace
>
class DisplacementCriteria
: public ConvergenceCriteria< TSparseSpace, TDenseSpace >
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION( DisplacementCriteria );
typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType;
typedef TSparseSpace SparseSpaceType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef std::size_t IndexType;
typedef std::size_t SizeType;
///@}
///@name Life Cycle
///@{
/** Constructor.
*/
explicit DisplacementCriteria(Kratos::Parameters Settings)
: BaseType()
{
if (Settings.Has("displacement_absolute_tolerance")) {
mAlwaysConvergedNorm = Settings["displacement_absolute_tolerance"].GetDouble();
} else if (Settings.Has("absolute_tolerance")) {
mAlwaysConvergedNorm = Settings["absolute_tolerance"].GetDouble();
} else {
KRATOS_WARNING("DisplacementCriteria") << "displacement_absolute_tolerance or absolute_tolerance nor defined on settings. Using default 1.0e-9" << std::endl;
mAlwaysConvergedNorm = 1.0e-9;
}
if (Settings.Has("displacement_relative_tolerance")) {
mRatioTolerance = Settings["displacement_relative_tolerance"].GetDouble();
} else if (Settings.Has("relative_tolerance")) {
mRatioTolerance = Settings["relative_tolerance"].GetDouble();
} else {
KRATOS_WARNING("DisplacementCriteria") << "displacement_relative_tolerance or relative_tolerance nor defined on settings. Using default 1.0e-4" << std::endl;
mRatioTolerance = 1.0e-4;
}
}
/** Constructor.
*/
explicit DisplacementCriteria(
TDataType NewRatioTolerance,
TDataType AlwaysConvergedNorm)
: BaseType(),
mRatioTolerance(NewRatioTolerance),
mAlwaysConvergedNorm(AlwaysConvergedNorm)
{
}
/** Copy constructor.
*/
explicit DisplacementCriteria( DisplacementCriteria const& rOther )
:BaseType(rOther)
,mRatioTolerance(rOther.mRatioTolerance)
,mAlwaysConvergedNorm(rOther.mAlwaysConvergedNorm)
,mReferenceDispNorm(rOther.mReferenceDispNorm)
{
}
/** Destructor.
*/
~DisplacementCriteria() override {}
///@}
///@name Operators
///@{
/**
* Compute relative and absolute error.
* @param rModelPart Reference to the ModelPart containing the problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param A System matrix (unused)
* @param Dx Vector of results (variations on nodal variables)
* @param b RHS vector (residual + reactions)
* @return true if convergence is achieved, false otherwise
*/
bool PostCriteria(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& A,
const TSystemVectorType& Dx,
const TSystemVectorType& b
) override
{
const TDataType approx_zero_tolerance = std::numeric_limits<TDataType>::epsilon();
const SizeType size_Dx = Dx.size();
if (size_Dx != 0) { //if we are solving for something
SizeType size_solution;
TDataType final_correction_norm = CalculateFinalCorrectionNorm(size_solution, rDofSet, Dx);
TDataType ratio = 0.0;
CalculateReferenceNorm(rDofSet);
if (mReferenceDispNorm < approx_zero_tolerance) {
KRATOS_WARNING("DisplacementCriteria") << "NaN norm is detected. Setting reference to convergence criteria" << std::endl;
mReferenceDispNorm = final_correction_norm;
}
if(final_correction_norm < approx_zero_tolerance) {
ratio = 0.0;
} else {
ratio = final_correction_norm/mReferenceDispNorm;
}
const TDataType float_size_solution = static_cast<TDataType>(size_solution);
const TDataType absolute_norm = (final_correction_norm/std::sqrt(float_size_solution));
KRATOS_INFO_IF("DISPLACEMENT CRITERION", this->GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0) << " :: [ Obtained ratio = " << ratio << "; Expected ratio = " << mRatioTolerance << "; Absolute norm = " << absolute_norm << "; Expected norm = " << mAlwaysConvergedNorm << "]" << std::endl;
rModelPart.GetProcessInfo()[CONVERGENCE_RATIO] = ratio;
rModelPart.GetProcessInfo()[RESIDUAL_NORM] = absolute_norm;
if ( ratio <= mRatioTolerance || absolute_norm<mAlwaysConvergedNorm ) { // || (final_correction_norm/x.size())<=1e-7)
KRATOS_INFO_IF("DISPLACEMENT CRITERION", this->GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0) << "Convergence is achieved" << std::endl;
return true;
} else {
return false;
}
} else { //in this case all the displacements are imposed!
return true;
}
}
/**
* This function initialize the convergence criteria
* @param rModelPart Reference to the ModelPart containing the problem. (unused)
*/
void Initialize(
ModelPart& rModelPart
) override
{
BaseType::mConvergenceCriteriaIsInitialized = true;
}
/**
* This function initializes the solution step
* @param rModelPart Reference to the ModelPart containing the problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param A System matrix (unused)
* @param Dx Vector of results (variations on nodal variables)
* @param b RHS vector (residual + reactions)
*/
void InitializeSolutionStep(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& A,
const TSystemVectorType& Dx,
const TSystemVectorType& b
) override
{
BaseType::InitializeSolutionStep(rModelPart, rDofSet, A, Dx, b);
}
/**
* This function finalizes the solution step
* @param rModelPart Reference to the ModelPart containing the problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param A System matrix (unused)
* @param Dx Vector of results (variations on nodal variables)
* @param b RHS vector (residual + reactions)
*/
void FinalizeSolutionStep(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& A,
const TSystemVectorType& Dx,
const TSystemVectorType& b
) override
{
BaseType::FinalizeSolutionStep(rModelPart, rDofSet, A, Dx, b);
}
///@}
///@name Operations
///@{
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "DisplacementCriteria";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info();
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
TDataType mRatioTolerance; /// The ratio threshold for the norm of the residual
TDataType mAlwaysConvergedNorm; /// The absolute value threshold for the norm of the residual
TDataType mReferenceDispNorm; /// The norm at the beginning of the iterations
///@}
///@name Private Operators
///@{
/**
* @brief This method computes the reference norm
* @details It checks if the dof is fixed
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
*/
void CalculateReferenceNorm(DofsArrayType& rDofSet)
{
TDataType reference_disp_norm = TDataType();
TDataType dof_value;
#pragma omp parallel for reduction(+:reference_disp_norm)
for (int i = 0; i < static_cast<int>(rDofSet.size()); i++) {
auto it_dof = rDofSet.begin() + i;
if(it_dof->IsFree()) {
dof_value = it_dof->GetSolutionStepValue();
reference_disp_norm += dof_value * dof_value;
}
}
mReferenceDispNorm = std::sqrt(reference_disp_norm);
}
/**
* @brief This method computes the final norm
* @details It checks if the dof is fixed
* @param rDofNum The number of DoFs
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param Dx Vector of results (variations on nodal variables)
*/
TDataType CalculateFinalCorrectionNorm(
SizeType& rDofNum,
DofsArrayType& rDofSet,
const TSystemVectorType& Dx
)
{
// Initialize
TDataType final_correction_norm = TDataType();
SizeType dof_num = 0;
// Loop over Dofs
#pragma omp parallel for reduction(+:final_correction_norm,dof_num)
for (int i = 0; i < static_cast<int>(rDofSet.size()); i++) {
auto it_dof = rDofSet.begin() + i;
IndexType dof_id;
TDataType variation_dof_value;
if (it_dof->IsFree()) {
dof_id = it_dof->EquationId();
variation_dof_value = Dx[dof_id];
final_correction_norm += std::pow(variation_dof_value, 2);
dof_num++;
}
}
rDofNum = dof_num;
return std::sqrt(final_correction_norm);
}
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class DisplacementCriteria */
///@}
///@name Type Definitions
///@{
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_DISPLACEMENT_CRITERIA defined */
|
mysql_fmt_plug.c | /* MYSQL_half_fmt.c
*
* Copyright (c) 2008 by <earthquake at rycon.hu>
*
* John the ripper MYSQL-fast module
*
*
* Note: The mysql hash's first 8byte is relevant,
* the another ones depends on the first 8. Maybe
* the passwords after 9-10character have collision
* in the first 8byte, so we have to check the full
* hash.
*
* Unbelievable good optimization by Péter Kasza
*
* http://rycon.hu/
*
* OpenMP support and other assorted hacks by Solar Designer
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_MYSQL_fast;
#elif FMT_REGISTERS_H
john_register_one(&fmt_MYSQL_fast);
#else
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#define OMP_SCALE 81920
#endif
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "memdbg.h"
#define FORMAT_LABEL "mysql"
#define FORMAT_NAME "MySQL pre-4.1"
#define ALGORITHM_NAME "32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 32
#define CIPHERTEXT_LENGTH 16
#define BINARY_SIZE 4
#define SALT_SIZE 0
#define BINARY_ALIGN sizeof(ARCH_WORD_32)
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 8
static struct fmt_tests tests[] = {
// ciphertext, plaintext
{"445ff82636a7ba59", "probe"},
{"60671c896665c3fa", "a"},
{"1acbed4a27b20da3", "hash"},
{"77ff75006118bab8", "hacker"},
{"1b38cd9c2f809809", "hacktivity2008"},
{"1b38cd9c2f809809", "hacktivity 2008"},
{"6fc81597422015a8", "johnmodule"},
{"30f098972cc8924d", "http://guh.nu"},
{"3fc56f6037218993", "Andrew Hintz"},
{"697a7de87c5390b2", "drew"},
{"1eb71cf460712b3e", "http://4tphi.net"},
{"28ff8d49159ffbaf", "http://violating.us"},
{"5d2e19393cc5ef67", "password"},
{"5030573512345671", ""},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_key)[BINARY_SIZE / 4];
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_alloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_CACHE);
crypt_key = mem_alloc_tiny(sizeof(*crypt_key) * self->params.max_keys_per_crypt, MEM_ALIGN_CACHE);
}
static int valid(char* ciphertext, struct fmt_main *self)
{
unsigned int i;
if (strlen(ciphertext) != CIPHERTEXT_LENGTH)
return 0;
for (i = 0; i < CIPHERTEXT_LENGTH; i++)
if (atoi16[ARCH_INDEX(ciphertext[i])] > 15)
return 0;
return 1;
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[CIPHERTEXT_LENGTH + 1];
memcpy(out, ciphertext, CIPHERTEXT_LENGTH);
out[CIPHERTEXT_LENGTH] = 0;
strlwr(out);
return out;
}
static void *get_binary_size(char *ciphertext, int size)
{
/* maybe bigger than BINARY_SIZE for use from cmp_exact() */
static ARCH_WORD_32 buff_[8];
unsigned char *buff = (unsigned char *)buff_;
unsigned int i;
for (i = 0; i < size; i++) {
#if ARCH_LITTLE_ENDIAN
buff[(i & ~3U) | (3 - (i & 3))] = atoi16[ARCH_INDEX(ciphertext[i * 2])] * 16 + atoi16[ARCH_INDEX(ciphertext[i * 2 + 1])];
#else
buff[i] = atoi16[ARCH_INDEX(ciphertext[i * 2])] * 16 + atoi16[ARCH_INDEX(ciphertext[i * 2 + 1])];
#endif
}
return buff;
}
static void *get_binary(char *ciphertext)
{
return get_binary_size(ciphertext, BINARY_SIZE);
}
static void set_key(char* key, int index)
{
strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH + 1);
}
static char* get_key(int index)
{
return saved_key[index];
}
static int cmp_one(void* binary, int index)
{
return *(ARCH_WORD_32 *)binary == crypt_key[index][0];
}
static int cmp_all(void* binary, int count)
{
int i;
#ifdef _OPENMP
int retval = 0;
#pragma omp parallel for default(none) private(i) shared(count, binary, crypt_key, retval)
for (i = 0; i < count; i++)
if (*(ARCH_WORD_32 *)binary == crypt_key[i][0])
#pragma omp atomic
retval |= 1;
return retval;
#else
for (i = 0; i < count; i++)
if (*(ARCH_WORD_32 *)binary == crypt_key[i][0])
return 1;
return 0;
#endif
}
static int cmp_exact(char* source, int index)
{
register ARCH_WORD_32 nr = 1345345333, add = 7, nr2 = 0x12345671;
register ARCH_WORD_32 tmp;
unsigned char *p;
p = (unsigned char *)saved_key[index];
for (; *p; p++) {
if (*p == ' ' || *p == '\t')
continue;
tmp = (ARCH_WORD_32)*p;
nr ^= (((nr & 63) + add) * tmp) + (nr << 8);
nr2 += (nr2 << 8) ^ nr;
add += tmp;
}
#if 0
{
char ctmp[CIPHERTEXT_LENGTH + 1];
sprintf(ctmp, "%08x%08x", nr & (((ARCH_WORD_32)1 << 31) - 1), nr2 & (((ARCH_WORD_32)1 << 31) - 1));
return !memcmp(source, ctmp, CIPHERTEXT_LENGTH);
}
#else
{
ARCH_WORD_32 *binary = get_binary_size(source, 8);
return
binary[0] == (nr & (((ARCH_WORD_32)1 << 31) - 1)) &&
binary[1] == (nr2 & (((ARCH_WORD_32)1 << 31) - 1));
}
#endif
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int i = 0;
#ifdef _OPENMP
#pragma omp parallel for default(none) private(i) shared(count, saved_key, crypt_key)
#endif
#if MAX_KEYS_PER_CRYPT > 1 || defined(_OPENMP)
for (i = 0; i < count; i++)
#endif
{
unsigned char *p = (unsigned char *)saved_key[i];
if (*p) {
ARCH_WORD_32 nr, add;
ARCH_WORD_32 tmp;
while (*p == ' ' || *p == '\t')
p++;
tmp = (ARCH_WORD_32) (unsigned char) *p++;
nr = 1345345333 ^ ((((1345345333 & 63) + 7) * tmp) + (1345345333U << 8));
add = 7 + tmp;
for (; *p; p++) {
if (*p == ' ' || *p == '\t')
continue;
tmp = (ARCH_WORD_32) (unsigned char) *p;
nr ^= (((nr & 63) + add) * tmp) + (nr << 8);
add += tmp;
}
crypt_key[i][0] = (nr & (((ARCH_WORD_32)1 << 31) - 1));
#if MAX_KEYS_PER_CRYPT > 1 || defined(_OPENMP)
continue;
#else
return count;
#endif
}
crypt_key[i][0] = (1345345333 & (((ARCH_WORD_32)1 << 31) - 1));
}
return count;
}
static int get_hash_0(int index)
{
return crypt_key[index][0] & 0xF;
}
static int get_hash_1(int index)
{
return crypt_key[index][0] & 0xFF;
}
static int get_hash_2(int index)
{
return crypt_key[index][0] & 0xFFF;
}
static int get_hash_3(int index)
{
return crypt_key[index][0] & 0xFFFF;
}
static int get_hash_4(int index)
{
return crypt_key[index][0] & 0xFFFFF;
}
static int get_hash_5(int index)
{
return crypt_key[index][0] & 0xFFFFFF;
}
static int get_hash_6(int index)
{
return crypt_key[index][0] & 0x7FFFFFF;
}
struct fmt_main fmt_MYSQL_fast =
{
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
tests
}, {
init,
fmt_default_done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
get_binary,
fmt_default_salt,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
fmt_default_set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.